Example #1
0
    def test_graph_disconnected_to_dot(self):
        dependencies_expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )
        disconnected_expected = ('cheese', 'bacon', 'strawberry')

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf, skip_disconnected=False)
        buf.seek(0)
        lines = buf.readlines()

        dependencies_lines = []
        disconnected_lines = []

        # First sort output lines into dependencies and disconnected lines.
        # We also skip the attribute lines, and don't include the "{" and "}"
        # lines.
        disconnected_active = False
        for line in lines[1:-1]:  # Skip first and last line
            if line.startswith('subgraph disconnected'):
                disconnected_active = True
                continue
            if line.startswith('}') and disconnected_active:
                disconnected_active = False
                continue

            if disconnected_active:
                # Skip the 'label = "Disconnected"', etc. attribute lines.
                if ' = ' not in line:
                    disconnected_lines.append(line)
            else:
                dependencies_lines.append(line)

        dependencies_matches = []
        for line in dependencies_lines:
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            dependencies_matches.append(match.groups())

        disconnected_matches = []
        for line in disconnected_lines:
            if line[-1] == '\n':
                line = line[:-1]
            line = line.strip('"')
            disconnected_matches.append(line)

        self.checkLists(dependencies_matches, dependencies_expected)
        self.checkLists(disconnected_matches, disconnected_expected)
Example #2
0
 def render_POST(self, request):
   text = request.args.get("feedback")
   if text is None:
     raise FeedbackException("No text.")
   if len(text) > 50000:
     raise FeedbackException("Too much text.")
     
   text = text[0]
   
   # basic checksum to stop really lame kiddies spamming, see feedback.js for js version
   checksum = 0;
   text = text.decode("utf-8", "ignore")
   for x in text:
     checksum = ((checksum + 1) % 256) ^ (ord(x) % 256);
   
   sentchecksum = int(request.args.get("c", [0])[0])
   if checksum != sentchecksum:
     raise FeedbackException("Bad checksum: %d vs. %d" % (sentchecksum, checksum))
     
   msg = MIMEText(text.encode("utf-8"), _charset="utf-8")
   msg["Subject"] = "qwebirc feedback from %s" % request.getclientIP()
   msg["From"] = config.feedbackengine["from"]
   msg["To"] = config.feedbackengine["to"]
   email = StringIO(msg.as_string())
   email.seek(0, 0)
   
   factorytype = SMTPSenderFactory
   factory = factorytype(fromEmail=config.feedbackengine["from"], toEmail=config.feedbackengine["to"], file=email, deferred=defer.Deferred())
   reactor.connectTCP(config.feedbackengine["smtp_host"], config.feedbackengine["smtp_port"], factory)
   self.__hit()
   return "1"
Example #3
0
 def checkRoundtrip(self, t):
     s = StringIO()
     t.write(s)
     s.seek(0)
     t2 = xpt.Typelib.read(s)
     self.assert_(t2 is not None)
     self.assertEqualTypelibs(t, t2)
Example #4
0
    def test_graph_bad_version_to_dot(self):
        expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG + self.BAD_EGGS:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf)
        buf.seek(0)
        matches = []
        lines = buf.readlines()
        for line in lines[1:-1]:  # skip the first and the last lines
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            matches.append(match.groups())

        self.checkLists(matches, expected)
Example #5
0
 def loop(self, filename, format):
     original_records = list(SeqIO.parse(open(filename, "rU"), format))
     # now open a connection to load the database
     server = BioSeqDatabase.open_database(driver = DBDRIVER,
                                           user = DBUSER, passwd = DBPASSWD,
                                           host = DBHOST, db = TESTDB)
     db_name = "test_loop_%s" % filename  # new namespace!
     db = server.new_database(db_name)
     count = db.load(original_records)
     self.assertEqual(count, len(original_records))
     server.commit()
     #Now read them back...
     biosql_records = [db.lookup(name=rec.name)
                       for rec in original_records]
     #And check they agree
     self.assertTrue(compare_records(original_records, biosql_records))
     #Now write to a handle...
     handle = StringIO()
     SeqIO.write(biosql_records, handle, "gb")
     #Now read them back...
     handle.seek(0)
     new_records = list(SeqIO.parse(handle, "gb"))
     #And check they still agree
     self.assertEqual(len(new_records), len(original_records))
     for old, new in zip(original_records, new_records):
         #TODO - remove this hack because we don't yet write these (yet):
         for key in ["comment", "references", "db_source"]:
             if key in old.annotations and key not in new.annotations:
                 del old.annotations[key]
         self.assertTrue(compare_record(old, new))
     #Done
     server.close()
Example #6
0
def deepCopy(obj):
    stream = StringIO()
    p = Pickler(stream, 1)
    p.dump(obj)
    stream.seek(0)
    u = Unpickler(stream)
    return u.load()
Example #7
0
 def browse(self, max_lines=None, headers=None):
   """Try reading specified number of lines from the CSV object.
   Args:
     max_lines: max number of lines to read. If None, the whole file is read
     headers: a list of strings as column names. If None, it will use "col0, col1..."
   Returns:
     A pandas DataFrame with the schema inferred from the data.
   Raises:
     Exception if the csv object cannot be read or not enough lines to read, or the
     headers size does not match columns size.
   """
   if self.path.startswith('gs://'):
     lines = Csv._read_gcs_lines(self.path, max_lines)
   else:
     lines = Csv._read_local_lines(self.path, max_lines)
   if len(lines) == 0:
     return pd.DataFrame(columns=headers)
   columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))
   if headers is None:
     headers = ['col' + newstr(e) for e in range(columns_size)]
   if len(headers) != columns_size:
     raise Exception('Number of columns in CSV do not match number of headers')
   buf = StringIO()
   for line in lines:
     buf.write(line)
     buf.write('\n')
   buf.seek(0)
   df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)
   for key, col in df.iteritems():
     if self._is_probably_categorical(col):
       df[key] = df[key].astype('category')
   return df
class RawHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
	def setup(self):
		self.socket = self.request
		self.socket.handler = self
		self.rfile = StringIO()
		self.rfilePos = 0
		self.wfile = SingleSocketWriter(self.socket)

	def handle(self):
		pass

	def finish(self):
		pass

	def data_came_in(self, socket, data):
		self.rfile.write(data)
		self.rfile.seek(0, self.rfilePos)
		if self.rfile.read().find('\r\n\r\n') >= 0:
			self.rfile.seek(0, self.rfilePos)
			self.done = False
			self.handle_one_request()
			if self.done:
				socket.close()
				return
			self.rfilePos = self.rfile.tell()
		# TODO: Check close_connection (a la BaseHTTPRequestHandler.handle())

	def connection_flushed(self, unused_socket):
		self.wfile.flushed()
Example #9
0
 def write_out(self, fde):
     """
     Write out full packet chain
     """
     # TODO: impl is just outright terrible.
     # Fix it in any way shape or form i don't care
     sio = StringIO()
     seq_id = self.start_seq_id
     net_total_written = 0
     total_written = 0
     last_total_written = 0xffffff
     for label, field in self.fields:
         written = field.write_out(sio, label='\t%s'  % label)
         total_written += written
         net_total_written += written
         if total_written >= 0xffffff:
             self._write_packet_header(0xffffff, seq_id, fde)
             fde.write(sio.read(0xffffff))
             remaining_bytes = sio.read()
             sio.close()
             sio = StringIO(remaining_bytes)
             last_total_written = total_written
             total_written -= 0xffffff
             seq_id += 1
     if last_total_written == 0xffffff:
         self._write_packet_header(total_written, seq_id, fde)
         sio.seek(0)
         fde.write(sio.read(total_written))
         net_total_written += total_written
     return (net_total_written, seq_id)
Example #10
0
    def get(self,filename):
        name,sep,ext = filename.rpartition(".")
        if not sep:
            img_name = ext
        else:
            img_name = name
        try:
            img_file = self.fs.get_version(filename=img_name)
            img = img_file.read()
        except gridfs.errors.NoFile:
            raise tornado.web.HTTPError(500, 'image is not found ')
    
        resize = self.get_argument('_re', None)
        if resize :
            width, resep, height = resize.rpartition("x")
            output = StringIO()
            output.write(img)
            output.seek(0)
            im = Image.open(output)
            format = im.format
#            size = im.size
#            logging.info("format is %s ,size is %s" %(format,size))
            im = im.resize((int(width),int(height)), Image.ANTIALIAS)
            tmp = StringIO()
            im.save(tmp, format)
            img = tmp.getvalue()
            tmp.close()
            output.close()

        self.add_header('Content-Type',img_file.content_type)
        self.write(img)
        self.finish()
Example #11
0
 def _base64_img(self, im):
     f = StringIO()
     im.save(f, format='png')
     f.seek(0)
     prefix = 'data:image/png;base64,'
     b64 = prefix+base64.encodestring(f.read())
     return '<img src="%s" />' % b64
Example #12
0
    def __init__(self, fh):
        super(OfxPreprocessedFile,self).__init__(fh)

        if self.fh is None:
            return

        ofx_string = self.fh.read()

        # find all closing tags as hints
        closing_tags = [ t.upper() for t in re.findall(r'(?i)</([a-z0-9_\.]+)>', ofx_string) ]

        # close all tags that don't have closing tags and
        # leave all other data intact
        last_open_tag = None
        tokens        = re.split(r'(?i)(</?[a-z0-9_\.]+>)', ofx_string)
        new_fh        = StringIO()
        for idx,token in enumerate(tokens):
            is_closing_tag = token.startswith('</')
            is_processing_tag = token.startswith('<?')
            is_cdata = token.startswith('<!')
            is_tag = token.startswith('<') and not is_cdata
            is_open_tag = is_tag and not is_closing_tag and not is_processing_tag
            if is_tag:
                if last_open_tag is not None:
                    new_fh.write("</%s>" % last_open_tag)
                    last_open_tag = None
            if is_open_tag:
                tag_name = re.findall(r'(?i)<([a-z0-9_\.]+)>', token)[0]
                if tag_name.upper() not in closing_tags:
                    last_open_tag = tag_name
            new_fh.write(token)
        new_fh.seek(0)
        self.fh = new_fh
Example #13
0
    def test_irclike_badnick(self):
        """
        Management command irclike

        The nick is unknown
        """
        Song.objects.all().delete()
        HistoryEntry.objects.all().delete()

        song = Song.objects.create(
            artist="Lou Reed", album="Transformer", title="""song title""", genre="", score=0, family=0, global_score=0
        )

        HistoryEntry.objects.create(song=song)

        before = song.global_score

        nick = "this_is_not_a_real_nick"

        content = StringIO()
        call_command("irclike", nick, stderr=content)
        content.seek(0)
        output = content.read()

        upsong = Song.objects.get(pk=song.id)

        after = upsong.global_score

        self.assertEqual(before, 0)
        self.assertEqual(after, 0)
        self.assertEqual(output, "nick [%s] does not exist" % nick)
Example #14
0
def add_customer():
    from StringIO import StringIO
    if not session.get('logged_in'):
        abort(401)
    demand = generate_random_customer_data()
    image64 = generate_customer_demand_image(demand)

    new_customer = Customer(name=request.form['name'],
                            market_id=request.form['market_id'],
                            image64=image64)
    db.session.add(new_customer)
    db.session.commit()
    ids = np.array(range(len(demand)))
    ids.fill(new_customer.customer_id)
    demand_data = pd.DataFrame({'customer_id': ids,
                                'datetime': demand.index,
                                'value': demand.values})
    demand_buffer = StringIO()
    demand_data.to_csv(demand_buffer, header=False, index=False)
    demand_buffer.seek(0)
    cur = engine.raw_connection().cursor()
    cur.copy_from(demand_buffer, 'retail.customer_demand', sep=',')
    cur.connection.commit()
    flash('New customer was successfully added')
    return redirect(url_for('show_customers'))
Example #15
0
    def test_irclike(self):
        """
        Management command irclike

        The last play song has a score increase by one
        """
        Song.objects.all().delete()
        HistoryEntry.objects.all().delete()
        userp = UserProfile.objects.get(user=self.user)

        song = Song.objects.create(
            artist="Lou Reed", album="Transformer", title="""song title""", genre="", score=0, family=0, global_score=0
        )

        HistoryEntry.objects.create(song=song)

        before = song.global_score

        content = StringIO()
        call_command("irclike", userp.ircnick, stdout=content)
        content.seek(0)

        upsong = Song.objects.get(pk=song.id)

        after = upsong.global_score

        self.assertEqual(before, 0)
        self.assertEqual(after, 1)
Example #16
0
    def test_irclike_nonick(self):
        """
        Management command irclike

        The nick is missing, no action
        """
        Song.objects.all().delete()
        HistoryEntry.objects.all().delete()

        song = Song.objects.create(
            artist="Lou Reed", album="Transformer", title="""song title""", genre="", score=0, family=0, global_score=0
        )

        HistoryEntry.objects.create(song=song)

        before = song.global_score

        content = StringIO()
        call_command("irclike", stderr=content)
        content.seek(0)
        output = content.read()

        upsong = Song.objects.get(pk=song.id)

        after = upsong.global_score

        self.assertEqual(before, 0)
        self.assertEqual(after, 0)
        self.assertEqual(output, "Erreur, vous devez indiquez un nick irc en option")
Example #17
0
  def test_multiple_objects(self):
    """Test saving multiple distinct, objects"""

    sio = StringIO()
    m   = Mirror(sio, mode='record')

    # constructor arguments should have no effect on SomeService.hello()
    i1  = m(SomeService(""), id="name1")
    i2  = m(SomeService(""), id="name2")

    r1 = i1.hello("other1")
    r2 = i2.hello("other2")

    m.save()
    sio.seek(0)

    m2  = Mirror(sio, mode='replay', strict=True)
    i1_ = m2(SomeService("name1"), id="name1")
    i2_ = m2(SomeService("name2"), id="name2")

    self.assertEqual(r1, i1_.hello("other1"))
    self.assertEqual(r2, i2_.hello("other2"))
    self.assertEqual(i1_.count, 0)
    self.assertEqual(i2_.count, 0)

    self.assertRaises(KeyError, i1_.hello, "other2")
    self.assertRaises(KeyError, i2_.hello, "other1")
Example #18
0
def check_cpaste(code, should_fail=False):
    """Execute code via 'cpaste' and ensure it was executed, unless
    should_fail is set.
    """
    _ip.user_ns['code_ran'] = False

    src = StringIO()
    if not hasattr(src, 'encoding'):
        # IPython expects stdin to have an encoding attribute
        src.encoding = None
    src.write('\n')
    src.write(code)
    src.write('\n--\n')
    src.seek(0)

    stdin_save = sys.stdin
    sys.stdin = src
    
    try:
        context = tt.AssertPrints if should_fail else tt.AssertNotPrints
        with context("Traceback (most recent call last)"):
                _ip.magic('cpaste')
        
        if not should_fail:
            assert _ip.user_ns['code_ran']
    finally:
        sys.stdin = stdin_save
Example #19
0
File: bed.py Project: lukauskas/dgw
def read_bed(bed_file):
    """
    Parses the bed file specified into `pd.DataFrame`
    :param bed_file:
    :return:
    :rtype: `pd.DataFrame`
    """
    f = open(bed_file, 'r')
    try:
        s = StringIO()
        # Filter out all lines that do not start with "chr" as BED files are allowed to contain some junk
        for line in f:
            if line.startswith('chr'):
                s.write(line)
        s.seek(0)
        regions = pd.read_csv(s, sep="\t", header=None)
    finally:
        f.close()
        s.close()
    regions.columns = BED_COLUMNS[:len(regions.columns)]

    if len(regions.name) != len(regions.name.drop_duplicates()):
        raise Exception('Input BED file {0!r} contains duplicate values in name column. '
                        'Please ensure the names of the regions are unique'.format(bed_file))

    if 'name' in regions.columns:
        regions = regions.set_index('name')


    return Regions(regions)
Example #20
0
 def test_readAndWriteTextualFileHeader(self):
     """
     Reading and writing should not change the textual file header.
     """
     for file, attribs in self.files.iteritems():
         endian = attribs['endian']
         header_enc = attribs['textual_header_enc']
         file = os.path.join(self.path, file)
         # Read the file.
         f = open(file, 'rb')
         org_header = f.read(3200)
         f.seek(0, 0)
         # Initialize an empty SEGY object and set certain attributes.
         segy = SEGYFile()
         segy.endian = endian
         segy.file = f
         segy.textual_header_encoding = None
         # Read the textual header.
         segy._readTextualHeader()
         # Assert the encoding and compare with known values.
         self.assertEqual(segy.textual_header_encoding, header_enc)
         # Close the file.
         f.close()
         # The header writes to a file like object.
         new_header = StringIO()
         segy._writeTextualHeader(new_header)
         new_header.seek(0, 0)
         new_header = new_header.read()
         # Assert the correct length.
         self.assertEqual(len(new_header), 3200)
         # Assert the actual header.
         self.assertEqual(org_header, new_header)
Example #21
0
 def update_main_config(self, configobj):
     cfile = StringIO()
     configobj.write(cfile)
     cfile.seek(0)
     self.myconfig.readfp(cfile)
     self.save_main_config(self.mainconfigfilename)
     self.load_main_config()
Example #22
0
 def test_packAndUnpackIBMSpecialCases(self):
     """
     Tests the packing and unpacking of several powers of 16 which are
     problematic because they need separate handling in the algorithm.
     """
     endians = ['>', '<']
     # Create the first 10 powers of 16.
     data = []
     for i in xrange(10):
         data.append(16 ** i)
         data.append(-16 ** i)
     data = np.array(data)
     # Convert to float64 in case native floats are different to be
     # able to utilize double precision.
     data = np.require(data, 'float64')
     # Loop over little and big endian.
     for endian in endians:
         # Pack.
         f = StringIO()
         DATA_SAMPLE_FORMAT_PACK_FUNCTIONS[1](f, data, endian)
         # Jump to beginning and read again.
         f.seek(0, 0)
         new_data = DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[1](f,
                                 len(data), endian)
         f.close()
         # Test both.
         np.testing.assert_array_equal(new_data, data)
Example #23
0
 def test_packAndUnpackIBMFloat(self):
     """
     Packing and unpacking IBM floating points might yield some inaccuracies
     due to floating point rounding errors.
     This test tests a large number of random floating point numbers.
     """
     # Some random seeds.
     seeds = [1234, 592, 459482, 6901, 0, 7083, 68349]
     endians = ['<', '>']
     # Loop over all combinations.
     for seed in seeds:
         # Generate 50000 random floats from -10000 to +10000.
         np.random.seed(seed)
         data = 200000.0 * np.random.ranf(50000) - 100000.0
         # Convert to float64 in case native floats are different to be
         # able to utilize double precision.
         data = np.require(data, 'float64')
         # Loop over little and big endian.
         for endian in endians:
             # Pack.
             f = StringIO()
             DATA_SAMPLE_FORMAT_PACK_FUNCTIONS[1](f, data, endian)
             # Jump to beginning and read again.
             f.seek(0, 0)
             new_data = DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[1](f,
                                     len(data), endian)
             f.close()
             # A relative tolerance of 1E-6 is considered good enough.
             rms1 = rms(data, new_data)
             self.assertEqual(True, rms1 < 1E-6)
Example #24
0
 def test_packAndUnpackVerySmallIBMFloats(self):
     """
     The same test as test_packAndUnpackIBMFloat just for small numbers
     because they might suffer more from the inaccuracies.
     """
     # Some random seeds.
     seeds = [123, 1592, 4482, 601, 1, 783, 6849]
     endians = ['<', '>']
     # Loop over all combinations.
     for seed in seeds:
         # Generate 50000 random floats from -10000 to +10000.
         np.random.seed(seed)
         data = 1E-5 * np.random.ranf(50000)
         # Convert to float64 in case native floats are different to be
         # able to utilize double precision.
         data = np.require(data, 'float64')
         # Loop over little and big endian.
         for endian in endians:
             # Pack.
             f = StringIO()
             DATA_SAMPLE_FORMAT_PACK_FUNCTIONS[1](f, data, endian)
             # Jump to beginning and read again.
             f.seek(0, 0)
             new_data = DATA_SAMPLE_FORMAT_UNPACK_FUNCTIONS[1](f,
                                     len(data), endian)
             f.close()
             # A relative tolerance of 1E-6 is considered good enough.
             rms1 = rms(data, new_data)
             self.assertEqual(True, rms1 < 1E-6)
Example #25
0
def test():
    from StringIO import StringIO
    class TestConfig(Config):
        s = String("default", "doc of s")
        i = Integer(0x29a, "doc of i")
        b = Boolean(False, "doc of b")
    config = TestConfig()
    assert TestConfig.s.__doc__ == "doc of s"
    assert config.s == "default"
    assert not config.b
    config.s = "not"
    config.s += " default"
    config.i = 0
    config.b = True
    f = StringIO()
    config.save(f)
    val = f.getvalue()
    assert "doc of s" in val
    assert "not default" in val
    config = TestConfig()
    assert config.s == "default"
    f.seek(0)
    config.load(f)
    assert config.s == "not default"
    assert config.i == 0
    assert config.b is True
    observable.test()
Example #26
0
    def _render_zip(self, req, filename, repos, diff):
        """ZIP archive with all the added and/or modified files."""
        new_rev = diff.new_rev
        req.send_response(200)
        req.send_header('Content-Type', 'application/zip')
        req.send_header('Content-Disposition', 'attachment;'
                        'filename=%s.zip' % filename)

        from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

        buf = StringIO()
        zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
        for old_node, new_node, kind, change in repos.get_changes(**diff):
            if kind == Node.FILE and change != Changeset.DELETE:
                assert new_node
                zipinfo = ZipInfo()
                zipinfo.filename = new_node.path.encode('utf-8')
                # Note: unicode filenames are not supported by zipfile.
                # UTF-8 is not supported by all Zip tools either,
                # but as some does, I think UTF-8 is the best option here.
                zipinfo.date_time = time.gmtime(new_node.last_modified)[:6]
                zipinfo.compress_type = ZIP_DEFLATED
                zipfile.writestr(zipinfo, new_node.get_content().read())
        zipfile.close()

        buf.seek(0, 2) # be sure to be at the end
        req.send_header("Content-Length", buf.tell())
        req.end_headers()

        req.write(buf.getvalue())
Example #27
0
    def __patch_jenkins_config( self ):
        """
        A context manager that retrieves the Jenkins configuration XML, deserializes it into an
        XML ElementTree, yields the XML tree, then serializes the tree and saves it back to
        Jenkins.
        """
        config_file = StringIO( )
        if run( 'test -f ~/config.xml', quiet=True ).succeeded:
            fresh_instance = False
            get( remote_path='~/config.xml', local_path=config_file )
        else:
            # Get the in-memory config as the on-disk one may be absent on a fresh instance.
            # Luckily, a fresh instance won't have any configured security.
            fresh_instance = True
            config_url = 'http://localhost:8080/computer/(master)/config.xml'
            with hide( 'output' ):
                config_file.write( run( 'curl "%s"' % config_url ) )
        config_file.seek( 0 )
        config = ElementTree.parse( config_file )

        yield config

        config_file.truncate( 0 )
        config.write( config_file, encoding='utf-8', xml_declaration=True )
        if fresh_instance:
            self.__service_jenkins( 'stop' )
        try:
            put( local_path=config_file, remote_path='~/config.xml' )
        finally:
            if fresh_instance:
                self.__service_jenkins( 'start' )
            else:
                log.warn( 'Visit the Jenkins web UI and click Manage Jenkins - Reload '
                          'Configuration from Disk' )
Example #28
0
	def serialise(self, data):
		data = self._prepare(data)
		string = StringIO()
		writer = XMLWriter(string)

		if isinstance(data, dict):
			writer.start('result')
			for (key, subdata) in data.items():
				self._write(writer, key, subdata)
			writer.end('result')
		elif isinstance(data, (list, tuple)):
			writer.start('results')
			for d in data:
				self._write(writer, 'result', d)
			writer.end('results')
		elif isinstance(data, Iterable) and not isinstance(data, (str, unicode)):
			writer.start('results')
			for d in data:
				if isinstance(d, dict):
					writer.start('result')
					for (subkey, subdata) in d.items():
						if not subdata is None:
							self._write(writer, subkey, subdata)
					writer.end('result')
				else:
					writer.element('result', d)

			writer.end('results')
		elif not data is None:
			writer.element('result', unicode(data))

		string.seek(0)
		return string.read()
Example #29
0
def add_still(post_id):
    """
    访问权限:
    :param post_id:
    :return:
    """
    post = Post.query.get_or_404(post_id)
    redirect_url = request.args.get(u'redirect', url_for(u'.get_post', post_id=post_id))
    form = AddStillForm()

    if form.validate_on_submit():
        method = form.method.data
        try:
            still = Still(timeline=Still.timeline_str_to_int(form.time_min.data, form.time_sec.data),
                          comment=form.comment.data, post_id=post.id, private=bool(form.private.data))
            db.session.add(still)
            db.session.flush()
            if method == u'file':
                img = request.files[u'img_file']
            elif method == u'url':
                img = StringIO(urllib2.urlopen(form.img_url.data).read())
                img.seek(0)
            else:
                raise ValueError(u'无效的图片上传方式')
            save_post_image(img, get_post_dir(post), str(still.id))
            db.session.commit()
        except Exception, e:
            db.session.roolback()
            flash(u'ERROR: %s' % e.message)
        else:
            flash(u'剧照添加成功!')
Example #30
0
def add_post():
    """
    访问权限:
    :return:
    """
    form = AddPostForm()

    if form.validate_on_submit():
        method = form.method.data
        try:
            post = form.to_post(Post())
            db.session.add(post)
            db.session.flush()
            if method == u'file':
                img = request.files[u'img_file']
            elif method == u'url':
                img = StringIO(urllib2.urlopen(form.img_url.data).read())
                img.seek(0)
            else:
                raise ValueError(u'无效的图片上传方式')
            save_post_image(img, get_post_dir(post), u'archive')
            db.session.commit()
        except Exception, e:
            db.session.rollback()
            flash(u'ERROR: %s' % e.message)
        else:
            flash(u'海报添加成功!')
            return redirect(url_for(u'movie.get_post', post_id=post.id))
Example #31
0
    summary = AlignInfo.SummaryInfo(alignment)
    dumb_consensus = summary.dumb_consensus()
    #gap_consensus = summary.gap_consensus()
    if t_format != "nexus":
        #Hack for bug 2535
        pssm = summary.pos_specific_score_matrix()
        rep_dict = summary.replacement_dictionary()
        try:
            info_content = summary.information_content()
        except ValueError, e:
            if str(e) != "Error in alphabet: not Nucleotide or Protein, supply expected frequencies":
                raise e
            pass

    if t_count==1 and t_format not in ["nexus","emboss","fasta-m10"]:
        #print " Trying to read a triple concatenation of the input file"
        data = open(t_filename,"r").read()
        handle = StringIO()
        handle.write(data + "\n\n" + data + "\n\n" + data)
        handle.seek(0)
        assert 3 == len(list(AlignIO.parse(handle=handle, format=t_format, seq_count=t_per)))

    #Some alignment file formats have magic characters which mean
    #use the letter in this position in the first sequence.
    #They should all have been converted by the parser, but if
    #not reversing the record order might expose an error.  Maybe.
    alignments.reverse()
    check_simple_write_read(alignments)

print "Finished tested reading files"
Example #32
0
            except KeyError, e:
                pass

            # now re-retrieve from confStore
            rec = confStore.fetch_record(session, target)
            dom = rec.get_dom(session)
            config = dom.childNodes[0]
            config.setAttributeNS(None, 'configStore', cfgStr)
            context.subConfigs[target] = config

        try:
            xml = config.toxml()
        except AttributeError, e:
            stream = StringIO()
            Print(config, stream)
            stream.seek(0)
            xml = stream.read()

        self.send_xml(xml, req)

    def handle_listObjects(self, req):
        # List existing objects in context

        storeHash = {}
        collHash = {}
        processHash = {}

        # get start obj from request
        store = FieldStorage(req)
        try:
            objid = store['contextId']
Example #33
0
 def test_download_db(self):
     with open('./spike/rules.db', 'rb') as f:
         expected = StringIO(str(f.read()))
     expected.seek(0)
     rv = self.app.get('/download')
     self.assertEqual(str(rv.data), expected.read())
Example #34
0
 def _run_dummy_command(self, *args, **kwargs):
     """Runs the test command's execute method directly, and outputs a dict of the current context."""
     out = StringIO()
     DummyCommand().execute(*args, stdout=out, **kwargs)
     out.seek(0)
     return json.loads(out.read())
Example #35
0
 def get_csv_response_rows(self, response):
     response_content = StringIO(response.content)
     response_content.seek(0)
     return [row for row in csv.reader(response_content)]
Example #36
0
    def list_directory(self, path):
        try:
            list = os.listdir(path)
            all_entries = len(list)
        except os.error:
            self.send_error(404, "No permission to list directory")
            return None

        if web_list_by_datetime:
            # Sort by most recent modified date/time first
            list.sort(key=lambda x: os.stat(os.path.join(path, x)).st_mtime,
                      reverse=web_list_sort_descending)
        else:
            # Sort by File Name
            list.sort(key=lambda a: a.lower(),
                      reverse=web_list_sort_descending)
        f = StringIO()
        displaypath = cgi.escape(urllib.unquote(self.path))
        # find index of first file or hyperlink

        file_found = False
        cnt = 0
        for entry in list:  # See if there is a file for initializing iframe
            fullname = os.path.join(path, entry)
            if os.path.islink(fullname) or os.path.isfile(fullname):
                file_found = True
                break
            cnt += 1

        # Start HTML formatting code
        f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
        f.write('<head>')
        # Setup Meta Tags and better viewing on small screen devices
        f.write(
            '<meta "Content-Type" content="txt/html; charset=ISO-8859-1" />')
        f.write(
            '<meta name="viewport" content="width=device-width, initial-scale=1.0" />'
        )
        if web_page_refresh_on:
            f.write('<meta http-equiv="refresh" content="%s" />' %
                    web_page_refresh_sec)
        f.write('</head>')

        tpath, cur_folder = os.path.split(self.path)
        f.write("<html><title>%s %s</title>" % (web_page_title, self.path))
        f.write("<body>")
        # Start Left iframe Image Panel
        f.write('<iframe width="%s" height="%s" align="left"' %
                (web_iframe_width_usage, web_image_height))
        if file_found:  # file was display it in left pane
            f.write('src="%s" name="imgbox" id="imgbox" alt="%s">' %
                    (list[cnt], web_page_title))
        else:  # No files found so blank left pane
            f.write('src="%s" name="imgbox" id="imgbox" alt="%s">' %
                    ("about:blank", web_page_title))

        f.write('<p>iframes are not supported by your browser.</p></iframe>')
        # Start Right File selection List Panel
        list_style = '<div style="height: ' + web_list_height + 'px; overflow: auto; white-space: nowrap;">'
        f.write(list_style)
        # f.write('<center><b>%s</b></center>' % (self.path))
        # Show a refresh button
        refresh_button = (
            '''<FORM>&nbsp;&nbsp;<INPUT TYPE="button" onClick="history.go(0)"
VALUE="Refresh">&nbsp;&nbsp;<b>%s</b></FORM>''' % list_title)
        f.write('%s' % refresh_button)
        f.write(
            '<ul name="menu" id="menu" style="list-style-type:none; padding-left: 4px">'
        )
        # Create the formatted list of right panel hyper-links to files in the specified directory

        if not self.path is "/":  # Display folder Back arrow navigation if not in web root
            f.write('<li><a href="%s" >%s</a></li>\n' %
                    (urllib.quote(".."), cgi.escape("< BACK")))
        display_entries = 0
        file_found = False
        for name in list:
            display_entries += 1
            if web_max_list_entries > 1:
                if display_entries >= web_max_list_entries:
                    break
            fullname = os.path.join(path, name)
            displayname = linkname = name
            date_modified = time.strftime(
                '%H:%M:%S %d-%b-%Y',
                time.localtime(os.path.getmtime(fullname)))
            # Append / for directories or @ for symbolic links
            if os.path.islink(fullname):
                displayname = name + "@"  # symbolic link found
            if os.path.isdir(fullname):  # check if entry is a directory
                displayname = name + "/"
                linkname = os.path.join(displaypath, displayname)
                f.write('<li><a href="%s" >%s</a></li>\n' %
                        (urllib.quote(linkname), cgi.escape(displayname)))
            else:
                f.write('<li><a href="%s" target="imgbox">%s</a> - %s</li>\n' %
                        (urllib.quote(linkname), cgi.escape(displayname),
                         date_modified))

        if (
                not self.path is "/"
        ) and display_entries > 35:  # Display folder Back arrow navigation if not in web root
            f.write('<li><a href="%s" >%s</a></li>\n' %
                    (urllib.quote(".."), cgi.escape("< BACK")))
        f.write('</ul></div><p><b>')
        f.write(
            '<div style="float: left; padding-left: 40px;">Web Root is [ %s ]</div>'
            % web_server_root)
        f.write('<div style="text-align: center;">%s</div>' % web_page_title)

        if web_page_refresh_on:
            f.write(
                '<div style="float: left; padding-left: 40px;">Auto Refresh [ %s sec ]</div>'
                % web_page_refresh_sec)

        if web_max_list_entries > 1:
            f.write(
                '<div style="text-align: right; padding-right: 40px;">Listing Only %i of %i Files in [ %s ]</div>'
                % (display_entries, all_entries, self.path))
        else:
            f.write(
                '<div style="text-align: right; padding-right: 50px;">Listing All %i Files in [ %s ]</div>'
                % (all_entries, self.path))
        # Display web refresh info only if setting is turned on
        f.write('</b></p>')
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        encoding = sys.getfilesystemencoding()
        self.send_header("Content-type", "text/html; charset=%s" % encoding)
        self.send_header("Content-Length", str(length))
        self.end_headers()
        return f
Example #37
0
def rewrite_spec(subj, run, root="/home/jtaylo/FIAC-HBM2009"):
    """
    Take a FIAC specification file and get two specifications
    (experiment, begin).

    This creates two new .csv files, one for the experimental
    conditions, the other for the "initial" confounding trials that
    are to be modelled out. 

    For the block design, the "initial" trials are the first
    trials of each block. For the event designs, the 
    "initial" trials are made up of just the first trial.

    """

    if exists(
            pjoin("%(root)s", "fiac%(subj)d",
                  "subj%(subj)d_evt_fonc%(run)d.txt") % {
                      'root': root,
                      'subj': subj,
                      'run': run
                  }):
        designtype = 'evt'
    else:
        designtype = 'bloc'

    # Fix the format of the specification so it is
    # more in the form of a 2-way ANOVA

    eventdict = {1: 'SSt_SSp', 2: 'SSt_DSp', 3: 'DSt_SSp', 4: 'DSt_DSp'}
    s = StringIO()
    w = csv.writer(s)
    w.writerow(['time', 'sentence', 'speaker'])

    specfile = pjoin("%(root)s", "fiac%(subj)d",
                     "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {
                         'root': root,
                         'subj': subj,
                         'run': run,
                         'design': designtype
                     }
    d = np.loadtxt(specfile)
    for row in d:
        w.writerow([row[0]] + eventdict[row[1]].split('_'))
    s.seek(0)
    d = csv2rec(s)

    # Now, take care of the 'begin' event
    # This is due to the FIAC design

    if designtype == 'evt':
        b = np.array([(d[0]['time'], 1)],
                     np.dtype([('time', np.float), ('initial', np.int)]))
        d = d[1:]
    else:
        k = np.equal(np.arange(d.shape[0]) % 6, 0)
        b = np.array([(tt, 1) for tt in d[k]['time']],
                     np.dtype([('time', np.float), ('initial', np.int)]))
        d = d[~k]

    designtype = {'bloc': 'block', 'evt': 'event'}[designtype]

    fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s",
                  "experiment_%(run)02d.csv") % {
                      'root': root,
                      'subj': subj,
                      'run': run,
                      'design': designtype
                  }
    rec2csv(d, fname)
    experiment = csv2rec(fname)

    fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s",
                  "initial_%(run)02d.csv") % {
                      'root': root,
                      'subj': subj,
                      'run': run,
                      'design': designtype
                  }
    rec2csv(b, fname)
    initial = csv2rec(fname)

    return d, b
Example #38
0
    def build_ccm(self, ccm_name, current_image_width, current_image_height,
                  current_csv, dest_name):
        self.block0_nums = 0
        self.block1_nums = 0
        current_height = csv2data(current_csv)[0][-1]
        ccm_file = open(ccm_name, "rb")
        ccm_lines = ccm_file.readlines()
        char_nums = int(ccm_lines[0].split(":")[1].split("]")[0])
        ccm_lst = []
        for i in xrange(1, len(ccm_lines)):
            line = ccm_lines[i]
            if "," in line:
                (code_begin, code_end) = line.split(",")[:2]
                code_begin = int(code_begin, 16)
                code_end = int(code_end, 16)
                self.block1_nums += (code_end - code_begin + 1)
                ccm_lst.append((code_begin, code_end))
        self.block0_nums = len(ccm_lst)
        print("%d" % self.block0_nums)
        fontBuffer = StringIO()
        fontBuffer.seek(0)
        fontBuffer.write(struct.pack("I", 0x010000))
        fontBuffer.write(struct.pack("I", 0))  #文件总长度
        fontBuffer.write(struct.pack("I", current_height))
        fontBuffer.write(struct.pack("I", 0))
        fontBuffer.write(struct.pack("H", self.block0_nums))
        fontBuffer.write(struct.pack("H", self.block1_nums))
        fontBuffer.write(struct.pack("I", 0x20))  #写入block起始地址
        fontBuffer.write(struct.pack("I", 0x20 +
                                     self.block0_nums * 0xc))  #写入block1起始地址
        fontBuffer.write(struct.pack("I", 0x010000))
        fontBuffer.write("\x00" * (self.block0_nums * 0xc))
        fontBuffer.write("\x00" * (self.block1_nums * 0x18))
        fontBuffer.seek(0, 2)
        end_offset = fontBuffer.tell()
        fontBuffer.seek(4)
        fontBuffer.write(struct.pack("I", end_offset))
        fontBuffer.seek(0x20)
        data = self._makeCodeBin(ccm_lst)
        fontBuffer.write(data)
        fontBuffer.seek(0x20 + self.block0_nums * 0xc)
        data = self._makeXYbin(current_image_width, current_image_height,
                               current_csv)
        fontBuffer.write(data)
        fontBuffer.seek(0)
        destdata = fontBuffer.getvalue()
        dest = open("build//%s" % dest_name, "wb")
        dest.write(destdata)
        end = dest.tell()
        if end % 0x10 > 0:
            dest.write("\x00" * (0x10 - end % 0x10))
        dest.close()

        return True
Example #39
0
    def export_report(self):
        data, pt_names, pc_names = self.prepare_data()
        # create workbook
        book = xlwt.Workbook(encoding='utf8')
        # create sheet
        report_name = _('Product compare report')
        sheet = book.add_sheet(report_name)

        # create report object
        report_excel_output = self.env[
            'report.excel.output.extend'].with_context(
                filename_prefix='ProductCompareReport',
                form_title=report_name).create({})

        rowx = 0
        colx = 0

        # define title and header
        title_list = [
            _('Code'),
            _('Product'),
            _('Sizes'),
            _('Quant'),
            _('Detail'),
            _('Non exist product')
        ]
        colx_number = len(title_list) - 1

        # create header
        report_swh = _('%s') % self.stock_warehouse.name
        sheet.write_merge(rowx, rowx, 1, colx_number, report_swh, style_filter)
        sheet.write
        rowx += 1

        # create name
        sheet.write_merge(rowx, rowx, colx + 1, colx_number,
                          report_name.upper(), style_filter)
        rowx += 1

        # create filters
        if self.product_category:
            sheet.write_merge(rowx, rowx, 0, colx_number,
                              _("Category: ") + pc_names, style_filter)
        rowx += 1
        if self.product_template:
            sheet.write_merge(rowx, rowx, 0, colx_number,
                              _("Filtered by product: ") + pt_names,
                              style_filter)
        rowx += 1

        # create title
        sheet.write_merge(rowx, rowx, colx, colx + len(title_list) - 2,
                          _('Warehouse: ') + str(self.stock_warehouse.name),
                          style_title)
        sheet.write_merge(
            rowx, rowx, colx + len(title_list) - 1, colx + len(title_list) - 1,
            _('Warehouse compared: ') + str(self.stock_warehouse_compare.name),
            style_title)
        rowx += 1
        for i in xrange(0, len(title_list)):
            sheet.write_merge(rowx, rowx, i, i, title_list[i], style_title)
        rowx += 1

        if data:
            for d in data:
                sheet.write(rowx, colx, d['dc'], style_footer)
                sheet.write(rowx, colx + 1, d['tprod'], style_footer)
                sheet.write(rowx, colx + 2, d['size'], style_footer)
                sheet.write(rowx, colx + 3, d['qty'], style_footer)
                sheet.write(rowx, colx + 4, d['info'], style_footer)
                sheet.write(
                    rowx, colx + 5,
                    d['not_exist_prds'] if 'not_exist_prds' in d else '',
                    style_footer)
                rowx += 1
        # prepare file data
        io_buffer = StringIO()
        book.save(io_buffer)
        io_buffer.seek(0)
        filedata = base64.encodestring(io_buffer.getvalue())
        io_buffer.close()

        # set file data
        report_excel_output.filedata = filedata

        # call export function
        return report_excel_output.export_report()
Example #40
0
    def send_head(self):
        """Send response code and MIME header.

        This is common code for GET and HEAD commands.

        Return value is either a file object (which has to be copied
        to the outputfile by the caller unless the command was HEAD,
        and must be closed by the caller under all circumstances), or
        None, in which case the caller has nothing further to do.

        """
        path = self.translate_path(self.path)
        f = None
        if os.path.isdir(path):
            path_parts = list(self.path.partition('?'))
            if not path_parts[0].endswith('/'):
                # redirect browser - doing basically what apache does
                path_parts[0] += '/'
                self.send_response(301)
                self.send_header("Location", ''.join(path_parts))
                # begin no-cache patch
                # For redirects.  With redirects, caching is even worse and can
                # break more.  Especially with 301 Moved Permanently redirects,
                # like this one.
                self.send_header("Cache-Control", "no-cache, no-store, "
                                 "must-revalidate")
                self.send_header("Pragma", "no-cache")
                self.send_header("Expires", "0")
                # end no-cache patch
                self.end_headers()
                return None
            for index in "index.html", "index.htm":
                index = os.path.join(path, index)
                if os.path.exists(index):
                    path = index
                    break
            else:
                return self.list_directory(path)
        ctype = self.guess_type(path)
        try:
            # Always read in binary mode. Opening files in text mode may cause
            # newline translations, making the actual size of the content
            # transmitted *less* than the content-length!
            f = open(path, 'rb')
        except IOError:
            self.send_error(404, "File not found: {}".format(path))
            return None

        filtered_bytes = None
        if ctype == 'text/html':
            # Comment out any <base> to allow local resolution of relative URLs.
            data = f.read().decode('utf8')
            f.close()
            data = re.sub(r'<base\s([^>]*)>',
                          r'<!--base \g<1>-->',
                          data,
                          flags=re.IGNORECASE)
            data = data.encode('utf8')
            f = StringIO()
            f.write(data)
            filtered_bytes = len(data)
            f.seek(0)

        self.send_response(200)
        if ctype.startswith('text/') or ctype.endswith('+xml'):
            self.send_header("Content-Type",
                             "{0}; charset=UTF-8".format(ctype))
        else:
            self.send_header("Content-Type", ctype)
        if os.path.splitext(path)[1] == '.svgz':
            # Special handling for svgz to make it work nice with browsers.
            self.send_header("Content-Encoding", 'gzip')

        if filtered_bytes is None:
            fs = os.fstat(f.fileno())
            self.send_header('Content-Length', str(fs[6]))
        else:
            self.send_header('Content-Length', filtered_bytes)

        # begin no-cache patch
        # For standard requests.
        self.send_header("Cache-Control", "no-cache, no-store, "
                         "must-revalidate")
        self.send_header("Pragma", "no-cache")
        self.send_header("Expires", "0")
        # end no-cache patch
        self.end_headers()
        return f
Example #41
0
class TestResult(object):
    """Holder for test result information.

    Test #1lab_results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """
    _previousTestClass = None
    _testRunEntered = False
    _moduleSetUpFailed = False

    def __init__(self, stream=None, descriptions=None, verbosity=None):
        self.failfast = False
        self.failures = []
        self.errors = []
        self.testsRun = 0
        self.skipped = []
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.shouldStop = False
        self.buffer = False
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        self._mirrorOutput = False

    def printErrors(self):
        "Called by TestRunner after test run"

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.testsRun += 1
        self._mirrorOutput = False
        self._setupStdout()

    def _setupStdout(self):
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
                self._stdout_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def startTestRun(self):
        """Called once before any tests are executed.

        See startTest for a method called before each test.
        """

    def stopTest(self, test):
        """Called when the given test has been run"""
        self._restoreStdout()
        self._mirrorOutput = False

    def _restoreStdout(self):
        if self.buffer:
            if self._mirrorOutput:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()

    def stopTestRun(self):
        """Called once after all tests are executed.

        See stopTest for a method called after each test.
        """

    @failfast
    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        pass

    def addSkip(self, test, reason):
        """Called when a test is skipped."""
        self.skipped.append((test, reason))

    def addExpectedFailure(self, test, err):
        """Called when an expected failure/error occurred."""
        self.expectedFailures.append(
            (test, self._exc_info_to_string(err, test)))

    @failfast
    def addUnexpectedSuccess(self, test):
        """Called when a test was expected to fail, but succeed."""
        self.unexpectedSuccesses.append(test)

    def wasSuccessful(self):
        "Tells whether or not this result was a success"
        return len(self.failures) == len(self.errors) == 0

    def stop(self):
        "Indicates that the tests should be aborted"
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next

        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = sys.stdout.getvalue()
            error = sys.stderr.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        return ''.join(msgLines)

    def _is_relevant_tb_level(self, tb):
        return '__unittest' in tb.tb_frame.f_globals

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return ("<%s run=%i errors=%i failures=%i>" %
                (util.strclass(self.__class__), self.testsRun, len(
                    self.errors), len(self.failures)))
Example #42
0
    def test_dataset_workflow(self):
        from solariat_bottle.utils.predictor_events import translate_column

        acc = self.user.account

        # create
        with open(CSV_FILEPATH) as csv_file:
            post_data = self.get_post_data(csv_file)
            # test create
            resp = self.client.post('/dataset/create',
                                    buffered=True,
                                    content_type='multipart/form-data',
                                    data=post_data,
                                    base_url='https://localhost')

            self.assertEqual(resp.status_code, 201)
            data = json.loads(resp.data)
            self.assertTrue(data['ok'])
            self.assertEqual(data['data']['sync_status'], Dataset.OUT_OF_SYNC)
            self.assertTrue(data['data']['schema'])
            self.assertFalse(data['data']['is_locked'])
            dataset = acc.datasets.get_dataset(self.user,
                                               CREATE_UPDATE_DATASET_NAME)
            schema = dataset.schema
            DataClass = dataset.get_data_class()
            self.assertEqual(DataClass.objects.count(), 50)

        # test update schema
        # based on test data, just lets change one column type
        itx_col_name = translate_column('INTERACTION_ID')
        itx_col = [s for s in schema if s['name'] == itx_col_name][0]
        assert itx_col['type'] in ('integer', 'timestamp'), (itx_col['type'],
                                                             itx_col_name)
        itx_col['type'] = 'string'
        data = self._post('/dataset/update_schema/%s' %
                          CREATE_UPDATE_DATASET_NAME, {'schema': schema},
                          expected_code=201)
        dataset = acc.datasets.get_dataset(self.user,
                                           CREATE_UPDATE_DATASET_NAME)
        self.assertTrue(bool([1 for col in dataset.schema if col['name'] == itx_col_name \
                                                     and col['type'] == 'string']))

        # test invalid schema
        broken_schema = schema[1:]
        data = self._post('/dataset/update_schema/%s' %
                          CREATE_UPDATE_DATASET_NAME,
                          {'schema': broken_schema},
                          expected_result=False,
                          expected_code=500)

        # cannot accept sync until it's happens
        data = self._post('/dataset/sync/accept/%s' %
                          CREATE_UPDATE_DATASET_NAME, {},
                          expected_result=False,
                          expected_code=500)

        # let's include the case when not all data could be synced
        FAIL_COL_NAME = 'STAT_INI_1'
        dataset.reload()
        col = [
            col for col in dataset.schema if col[KEY_NAME] == FAIL_COL_NAME
        ][0]
        self.assertEqual(col[KEY_TYPE], TYPE_INTEGER)
        raw_data = dataset.data_coll.find_one()
        dataset.data_coll.update({'_id': raw_data['_id']},
                                 {'$set': {
                                     FAIL_COL_NAME: 'fail'
                                 }})

        # test applying schema on dataset (synchronous mode for testing)
        data = self._post('/dataset/sync/apply/%s' %
                          CREATE_UPDATE_DATASET_NAME, {},
                          expected_code=201)

        self.assertEqual(data['data']['sync_status'], Dataset.SYNCED)
        self.assertTrue(data['data']['is_locked'])
        # we manually fail 1 raw sync
        self.assertEqual(data['data']['items_synced'], 49)

        # until we accpet/discard last sync,
        # our original collection keeps origin data
        dataset = acc.datasets.get_dataset(self.user,
                                           CREATE_UPDATE_DATASET_NAME)
        DataClass = dataset.get_data_class()
        self.assertEqual(DataClass.objects.count(), 50)

        data = self._post('/dataset/sync/apply/%s' %
                          CREATE_UPDATE_DATASET_NAME, {},
                          expected_result=False,
                          expected_code=500)

        data = self._post('/dataset/sync/accept/%s' %
                          CREATE_UPDATE_DATASET_NAME, {},
                          expected_code=201)
        dataset = acc.datasets.get_dataset(self.user,
                                           CREATE_UPDATE_DATASET_NAME)
        DataClass = dataset.get_data_class()
        self.assertEqual(DataClass.objects.count(), 49)

        # test update, append 50 items again
        with open(CSV_FILEPATH) as csv_file:
            post_data = self.get_post_data(csv_file)
            resp = self.client.post('/dataset/update/%s' %
                                    CREATE_UPDATE_DATASET_NAME,
                                    buffered=True,
                                    content_type='multipart/form-data',
                                    data=post_data,
                                    base_url='https://localhost')

            data = json.loads(resp.data)
            self.assertEqual(resp.status_code, 201)
            self.assertTrue(data['ok'])
            self.assertEqual(data['data']['rows'], 99)
            dataset = acc.datasets.get_dataset(self.user,
                                               CREATE_UPDATE_DATASET_NAME)
            DataClass = dataset.get_data_class()
            self.assertEqual(DataClass.objects.count(), 99)

        data = self._post('/dataset/update_schema/%s' %
                          CREATE_UPDATE_DATASET_NAME, {'schema': schema},
                          expected_result=False,
                          expected_code=500)

        # # prepare wrong schema for data update
        from StringIO import StringIO
        stream = StringIO()
        with open(CSV_FILEPATH) as csv_file:
            for row in csv_file:
                cols = row.split(CSV_SEPARATOR)
                if len(cols) > 1:
                    row = CSV_SEPARATOR.join(cols[1:])
                stream.write(row)
        stream.seek(0)
        post_data = self.get_post_data(stream)
        resp = self.client.post('/dataset/update/%s' %
                                CREATE_UPDATE_DATASET_NAME,
                                buffered=True,
                                content_type='multipart/form-data',
                                data=post_data,
                                base_url='https://localhost')

        self.assertEqual(resp.status_code, 500)
        data = json.loads(resp.data)
        self.assertFalse(data['ok'])
        dataset.drop_data()