Beispiel #1
0
    def test_sending_crap_ujson(self):
        test_dir = self._get_dir()
        os.chdir(os.path.dirname(__file__))

        data = StringIO()
        filepath = 'test_here.py'
        zf = zipfile.ZipFile(data, "w", compression=zipfile.ZIP_DEFLATED)
        info = zipfile.ZipInfo('test_here.py')
        info.external_attr = os.stat(filepath).st_mode << 16L

        with open(filepath) as f:
            zf.writestr(info, f.read())

        zf.close()
        data = data.getvalue()

        args = get_runner_args(
            fqn='test_here.TestWebSite.test_something',
            agents=1,
            users=1,
            hits=1,
            test_dir=test_dir,
            include_file=['test_here.py'])

        args['crap'] = data
        self.assertRaises(ValueError, start_runner, args)
Beispiel #2
0
 def download_tweets_csv(self, request, queryset):
     f = StringIO()
     w = unicodecsv.writer(f, encoding='utf-8')
     for tweet in queryset:
         w.writerow((
             tweet['data']['id'],
             tweet['data']['text'],
             tweet['data']['timestamp'],
             tweet['data']['retweet_count'],
             tweet['data']['favorite_count'],
             tweet['data']['in_reply_to_status_id'],
             tweet['data']['in_reply_to_user_id'],
             tweet['data']['retweeted_status_id'],
             tweet['data']['coords'],
             tweet['data']['user']['screen_name'],
             tweet['data']['user']['id'],
             tweet['data']['user']['name'],
         ))
     f.seek(0)
     response = HttpResponse(
         f.read(),
         content_type='text/csv'
     )
     response['Content-Disposition'] = 'attachment;filename=export.csv'
     return response
Beispiel #3
0
def test_latex_units():
    """
    Check to make sure that Latex and AASTex writers attempt to fall
    back on the **unit** attribute of **Column** if the supplied
    **latexdict** does not specify units.
    """
    t = table.Table([table.Column(name='date', data=['a','b']),
               table.Column(name='NUV exp.time', data=[1,2])])
    latexdict = copy.deepcopy(ascii.latexdicts['AA'])
    latexdict['units'] = {'NUV exp.time':'s'}
    out = StringIO()
    expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2 \\\\
\\enddata
\\end{table}
'''
    ascii.write(t, out, format='aastex', latexdict=latexdict)
    assert out.getvalue() == expected
    # use unit attribute instead
    t['NUV exp.time'].unit = units.s
    t['date'].unit = units.yr
    out = StringIO()
    ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
    assert out.getvalue() == expected.replace(
        'colhead{s}', 'colhead{$\mathrm{s}$}').replace(
        'colhead{ }', 'colhead{$\mathrm{yr}$}')
class SFTPStorageFile(File):

    def __init__(self, name, storage, mode):
        self._name = name
        self._storage = storage
        self._mode = mode
        self._is_dirty = False
        self.file = StringIO()
        self._is_read = False

    @property
    def size(self):
        if not hasattr(self, '_size'):
            self._size = self._storage.size(self._name)
        return self._size

    def read(self, num_bytes=None):
        if not self._is_read:
            self.file = self._storage._read(self._name)
            self._is_read = True

        return self.file.read(num_bytes)

    def write(self, content):
        if 'w' not in self._mode:
            raise AttributeError("File was opened for read-only access.")
        self.file = StringIO(content)
        self._is_dirty = True
        self._is_read = True

    def close(self):
        if self._is_dirty:
            self._storage._save(self._name, self.file.getvalue())
        self.file.close()
Beispiel #5
0
        def wrapped_func(*args, **kwargs):
            try:
                import cProfile
                profiler = cProfile.Profile()
                profiler.enable()
                log.debug("Agent profiling is enabled")
            except Exception:
                log.warn("Cannot enable profiler")

            # Catch any return value before disabling profiler
            ret_val = func(*args, **kwargs)

            # disable profiler and printout stats to stdout
            try:
                profiler.disable()
                import pstats
                from cStringIO import StringIO
                s = StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats("cumulative")
                ps.print_stats(AgentProfiler.PSTATS_LIMIT)
                log.info(s.getvalue())
            except Exception:
                log.warn("Cannot disable profiler")

            return ret_val
Beispiel #6
0
def test():
    import sys
    base = ''
    if sys.argv[1:]:
        fn = sys.argv[1]
        if fn == '-':
            fp = sys.stdin
        else:
            fp = open(fn)
    else:
        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        fp = StringIO(test_input)
    while 1:
        line = fp.readline()
        if not line: break
        words = line.split()
        if not words:
            continue
        url = words[0]
        parts = urlparse(url)
        print '%-10s : %s' % (url, parts)
        abs = urljoin(base, url)
        if not base:
            base = abs
        wrapped = '<URL:%s>' % abs
        print '%-10s = %s' % (url, wrapped)
        if len(words) == 3 and words[1] == '=':
            if wrapped != words[2]:
                print 'EXPECTED', words[2], '!!!!!!!!!!'
 def create_thumbnail(self, size):
     try:
         orig = self.avatar.storage.open(self.avatar.name, 'rb').read()
         image = Image.open(StringIO(orig))
     except IOError:
         return # What should we do here?  Render a "sorry, didn't work" img?
     (w, h) = image.size
     if w != size or h != size:
         if w > h:
             diff = (w - h) / 2
             image = image.crop((diff, 0, w - diff, h))
         else:
             diff = (h - w) / 2
             image = image.crop((0, diff, w, h - diff))
         image = image.resize((size, size), AVATAR_RESIZE_METHOD)
         if image.mode != "RGB":
             image = image.convert("RGB")
         thumb = StringIO()
         image.save(thumb, "JPEG")
         thumb_file = ContentFile(thumb.getvalue())
     else:
         thumb_file = ContentFile(orig)
     thumb = self.avatar.storage.save(\
             upload_avatar_file_path(instance=self, size=size), \
             thumb_file)
Beispiel #8
0
    def _iter(self, sparql_results_type, fields, bindings, boolean, triples):
        if sparql_results_type not in ('resultset', 'boolean'):
            raise TypeError("Unexpected results type: {0}".format(sparql_results_type))

        # We'll spool to a buffer, and only yield when it gets a bit big.
        buffer = StringIO()

        # Do these attribute lookups only once.
        json_dumps, json_dump, buffer_write = json.dumps, json.dump, buffer.write

        buffer_write('{\n')
        if sparql_results_type == 'boolean':
            buffer_write('  "head": {},\n')
            buffer_write('  "boolean": %s' % ('true' if boolean else 'false'))
        elif sparql_results_type == 'resultset':
            buffer_write('  "head": {\n')
            buffer_write('    "vars": [ %s ]\n' % ', '.join(json_dumps(field) for field in fields))
            buffer_write('  },\n')
            buffer_write('  "results": {\n')
            buffer_write('    "bindings": [\n')
            for i, binding in enumerate(bindings):
                buffer_write('      {' if i == 0 else ',\n      {')
                j = 0
                for field in fields:
                    value = binding.get(field)
                    if value is None:
                        continue
                    buffer_write(',\n        ' if j > 0 else '\n        ')
                    json_dump(field, buffer)
                    if isinstance(value, rdflib.URIRef):
                        buffer_write(': { "type": "uri"')
                    elif isinstance(value, rdflib.BNode):
                        buffer_write(': { "type": "bnode"')
                    elif value.datatype is not None:
                        buffer_write(': { "type": "typed-literal", "datatype": ')
                        json_dump(value.datatype, buffer)
                    elif value.language is not None:
                        buffer_write(': { "type": "literal", "xml:lang": ')
                        json_dump(value.language, buffer)
                    else:
                        buffer_write(': { "type": "literal"')
                    buffer_write(', "value": ')
                    json_dump(value, buffer)
                    buffer_write(' }')

                    j += 1

                buffer_write('\n      }')
            buffer_write('\n    ]')
            buffer_write('\n  }')


            if buffer.tell() > 65000: # Almost 64k
                yield buffer.getvalue()
                buffer.seek(0)
                buffer.truncate()

        buffer_write('\n}')
        yield buffer.getvalue()
        buffer.close()
Beispiel #9
0
def createZip(path):

    def walktree (top = ".", depthfirst = True):
        names = os.listdir(top)
        if not depthfirst:
            yield top, names
        for name in names:
            try:
                st = os.lstat(os.path.join(top, name))
            except os.error:
                continue
            if stat.S_ISDIR(st.st_mode):
                for (newtop, children) in walktree (os.path.join(top, name),
                                                    depthfirst):
                    yield newtop, children
        if depthfirst:
            yield top, names

    list=[]
    for (basepath, children) in walktree(path,False):
          for child in children:
              f=os.path.join(basepath,child)
              if os.path.isfile(f):
                    f = f.encode(sys.getfilesystemencoding())
                    list.append( f )

    f=StringIO()
    file = zipfile.ZipFile(f, "w")
    for fname in list:
        nfname=os.path.join(os.path.basename(path),fname[len(path)+1:])
        file.write(fname, nfname , zipfile.ZIP_DEFLATED)
    file.close()

    f.seek(0)
    return f
def load_url (url, referrer=None):
    """Attempt to load the url using pycurl and return the data
    (which is None if unsuccessful)"""

    data = None
    databuffer = StringIO()

    curl = pycurl.Curl()
    curl.setopt(pycurl.URL, url)
    curl.setopt(pycurl.FOLLOWLOCATION, 1)
    curl.setopt(pycurl.CONNECTTIMEOUT, 5)
    curl.setopt(pycurl.TIMEOUT, 8)
    curl.setopt(pycurl.WRITEFUNCTION, databuffer.write)
    curl.setopt(pycurl.USERAGENT, UA)
    curl.setopt(pycurl.COOKIEFILE, '')
    if referrer is not None:
        curl.setopt(pycurl.REFERER, referrer)
    try:
        curl.perform()
        data = databuffer.getvalue()
    except Exception:
        pass
    curl.close()

    return data
Beispiel #11
0
 def _get_store_info(self, value, min_compress_len):
     flags = 0
     if isinstance(value, unicode):
         value = value.encode("utf-8")
         min_compress_len = 0
     elif isinstance(value, str):
         pass
     elif isinstance(value, int):
         flags |= _FLAG_INTEGER
         value = "%d" % value
         min_compress_len = 0
     elif isinstance(value, long):
         flags |= _FLAG_LONG
         value = "%d" % value
     else:
         flags |= _FLAG_PICKLE
         f = StringIO()
         pickler = pickle.Pickler(f)
         pickler.dump(value)
         value = f.getvalue()
     lv = len(value)
     if min_compress_len and lv > min_compress_len:
         comp_val = zlib.compress(value)
         if len(comp_val) < lv:
             flags |= _FLAG_COMPRESSED
             value = comp_val
     return flags, value
Beispiel #12
0
 def zipAdded(self):
     "Add files to a zip until over SYNC_ZIP_SIZE. Return zip data."
     f = StringIO()
     z = zipfile.ZipFile(f, "w", compression=zipfile.ZIP_DEFLATED)
     sz = 0
     cnt = 0
     files = {}
     cur = self.db.execute(
         "select fname from log where type = ?", MEDIA_ADD)
     fnames = []
     while 1:
         fname = cur.fetchone()
         if not fname:
             # add a flag so the server knows it can clean up
             z.writestr("_finished", "")
             break
         fname = fname[0]
         fnames.append([fname])
         z.write(fname, str(cnt))
         files[str(cnt)] = fname
         sz += os.path.getsize(fname)
         if sz > SYNC_ZIP_SIZE:
             break
         cnt += 1
     z.writestr("_meta", simplejson.dumps(files))
     z.close()
     return f.getvalue(), fnames
Beispiel #13
0
def scale_image(img_upload, img_max_size):
    """Crop and scale an image file."""
    try:
        img = Image.open(img_upload)
    except IOError:
        return None

    src_width, src_height = img.size
    src_ratio = float(src_width) / float(src_height)
    dst_width, dst_height = img_max_size
    dst_ratio = float(dst_width) / float(dst_height)

    if dst_ratio < src_ratio:
        crop_height = src_height
        crop_width = crop_height * dst_ratio
        x_offset = int(float(src_width - crop_width) / 2)
        y_offset = 0
    else:
        crop_width = src_width
        crop_height = crop_width / dst_ratio
        x_offset = 0
        y_offset = int(float(src_height - crop_height) / 3)

    img = img.crop(
        (x_offset, y_offset,
         x_offset + int(crop_width), y_offset + int(crop_height)))
    img = img.resize((dst_width, dst_height), Image.ANTIALIAS)

    if img.mode != "RGB":
        img = img.convert("RGB")
    new_img = StringIO()
    img.save(new_img, "JPEG")
    img_data = new_img.getvalue()

    return ContentFile(img_data)
Beispiel #14
0
def make_thumbnail(record):
    """Make small and medium thumbnails of given record."""
    id = record.get_header().subject_uri.split('/')[-1].split('.')[0]
    id = "%010d" % int(id)
    path = "/".join([id[0:3], id[3:6], id[6:9]])

    data = record.get_data()
    image = Image.open(StringIO(data))

    sizes = dict(S=(116, 58), M=(180, 360), L=(500, 500))

    yield id + "-O.jpg", data

    for size in "SML":
        imgpath = "%s-%s.jpg" % (id, size)
        try:
            if image.mode != 'RGB':
                image = image.convert('RGB')

            thumbnail = StringIO()
            image.resize(sizes[size], resample=Image.ANTIALIAS).save(thumbnail, format='jpeg')
            yield imgpath, thumbnail.getvalue()
        except Exception, e:
            print 'ERROR:', id, str(e)
            sys.stdout.flush()
Beispiel #15
0
    def downloadmessage(self, msgidx, foldername):
        ''' dowloads one message and returns the converted mbox-style mail '''
        pageurl = "%sGetMessageSource.aspx?msgid=%s" % (self.baseurl, msgidx)
        r = self.getpage(pageurl)
        messageblock = r.read()

        try:
            pre = self.findvar(messageblock, 'messageblock', "<pre>(.*)</pre>")
        except ValueError:
            return None
        try:
            unescapedmsg = self.htmlparser.unescape(pre).encode('latin1')
        except:
            logger.error("Unable to unescape html of message\n%s", pre)
            return None
        # create a message object to convert it to mbox format
        try:
            msg = email.message_from_string(unescapedmsg)
        except:
            logger.error(
                "Unable to create message object from text\n%s", unescapedmsg)
        # add headers
        msg.add_header("X-GetOutlook-Version", self.version())
        msg.add_header("X-GetOutlook-msgidx", msgidx)
        msg.add_header("X-GetOutlook-Folder", foldername)
        # make flat
        msg_out = StringIO()
        msg_gen = Generator(msg_out, mangle_from_=True)
        msg_gen.flatten(msg, unixfrom=True)
        return msg_out.getvalue()
    def test_error_when_getinfo_false(self):

        # Command.process should complain if supports_getinfo == False
        # We support dynamic configuration, not static

        # The exception line number may change, so we're using a regex match instead
        expected_pattern = re.compile(
            '\r\n'
            'ERROR' \
            '\r\n' \
            '"NotImplementedError at ' \
            '\"\"' + \
            os.path.abspath(os.path.join(os.path.dirname(__file__),
                                         "../../splunklib/searchcommands/search_command.py")) + \
            '\"\"' \
            ', line \d\d\d : ' \
            'Command search appears to be statically configured and static configuration is unsupported by splunklib.searchcommands. Please ensure that default/commands.conf contains this stanza:\n\[search\]\nfilename = foo.py\nsupports_getinfo = true\nsupports_rawargs = true\noutputheader = true"' \
            '\r\n'
        )

        command = SearchCommand()
        result = StringIO()

        self.assertRaises(
            SystemExit, command.process, ['foo.py'], output_file=result)

        result.reset()
        observed = result.read()
        self.assertTrue(expected_pattern.match(observed))
Beispiel #17
0
def error(component, message, stdout=False):
    """log an error message"""
    # In case of error print also the stacktrace
    stacktrace = StringIO()
    print_exc(file=stacktrace)
    emessage = "%s\n%s" % (message, stacktrace.getvalue())
    logger('error', component, emessage, stdout=stdout)
def get_build_info():
    """Returns a string containing the build info."""
    global __build_info__
    if __build_info__ is not None:
        return __build_info__

    build_info_buffer = StringIO()
    original_dir = os.getcwd()

    try:
        # We need to execute the git command in the source root.
        os.chdir(__source_root__)
        # Add in the e-mail address of the user building it.
        (_, packager_email) = run_command('git config user.email', exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'Packaged by: %s' % packager_email.strip()

        # Determine the last commit from the log.
        (_, commit_id) = run_command('git log --summary -1 | head -n 1 | cut -d \' \' -f 2',
                                     exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'Latest commit: %s' % commit_id.strip()

        # Include the branch just for safety sake.
        (_, branch) = run_command('git branch | cut -d \' \' -f 2', exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'From branch: %s' % branch.strip()

        # Add a timestamp.
        print >>build_info_buffer, 'Build time: %s' % strftime("%Y-%m-%d %H:%M:%S UTC", gmtime())

        __build_info__ = build_info_buffer.getvalue()
        return __build_info__
    finally:
        os.chdir(original_dir)

        if build_info_buffer is not None:
            build_info_buffer.close()
    def test_exit_error_on_parser_error(self):
        # Command.process should produce an error message and exit on parser
        # errors, if invoked to execute. Same error message as expected_pattern
        # defined above

        expected_pattern = re.compile(
            '\r\n' \
            'ERROR\r\n' \
            '"ValueError at ""' + \
            os.path.abspath(os.path.join(os.path.dirname(__file__),
                                         "../../splunklib/searchcommands/search_command_internals.py")) + \
            '"", line \d\d\d : ' \
            'Unrecognized option: undefined_option = value"\r\n'
        )

        command = SearchCommand()
        result = StringIO()

        try:
            command.process(
                args=['foo.py', '__EXECUTE__', 'undefined_option=value'],
                input_file=StringIO('\r\n'), output_file=result)
        except SystemExit as e:
            result.reset()
            observed = result.read()
            self.assertNotEqual(e.code, 0)
            self.assertTrue(expected_pattern.match(observed))
        except BaseException as e:
            self.fail("Expected SystemExit, but caught %s" % type(e))
        else:
            self.fail("Expected SystemExit, but no exception was raised")
Beispiel #20
0
    def __resize(self, display):
        #resize and resample photo
        original_id = self._getDisplayId()
        string_image = StringIO(str(self.get_data(original_id)))
        if display == 'Original':
            return string_image

        crop = False
        width, height = self.displays.get(display, (0, 0))
        # Calculate image width, size
        if not (width and height):
            size = LISTING_DISPLAYS.get(display, self.width())
            width, height = self.__get_crop_aspect_ratio_size(size)
            crop = True
        else:
            width, height = self.__get_aspect_ratio_size(width, height)
        
        # Resize image
        newimg = StringIO()
        img = Image.open(string_image)
        fmt = img.format
        try: img = img.resize((width, height), Image.ANTIALIAS)
        except AttributeError: img = img.resize((width, height))
        
        # Crop if needed
        if crop:
            box = self.__get_crop_box(width, height)
            img = img.crop(box)
            #img.load()
        quality = self._photo_quality(string_image)
        img.save(newimg, fmt, quality=quality)
        newimg.seek(0)
        return newimg
Beispiel #21
0
 def _apply_watermark(self, datafile):
     text = self.aq_parent.watermark_text
     FONT = os.path.join(os.path.dirname(__file__), 'fonts', 'VeraSeBd.ttf')
     img = Image.open(datafile)
     newimg = StringIO()
     fmt = img.format
     watermark = Image.new("RGBA", (img.size[0], img.size[1]))
     draw = ImageDraw.ImageDraw(watermark, "RGBA")
     size = 0
     while True:
         size += 1
         nextfont = ImageFont.truetype(FONT, size)
         nexttextwidth, nexttextheight = nextfont.getsize(text)
         if nexttextwidth+nexttextheight/3 > watermark.size[0]:
             break
         font = nextfont
         textwidth, textheight = nexttextwidth, nexttextheight
     draw.setfont(font)
     draw.text(((watermark.size[0]-textwidth)/2,
                (watermark.size[1]-textheight)/2), text)
     watermark = watermark.rotate(degrees(atan(float(img.size[1])/img.size[0])),
                              Image.BICUBIC)
     mask = watermark.convert("L").point(lambda x: min(x, 88))
     watermark.putalpha(mask)
     img.paste(watermark, None, watermark)
     quality = self._photo_quality(datafile)
     img.save(newimg, fmt, quality=quality)
     newimg.seek(0)
     return newimg
Beispiel #22
0
 def testXReadLines(self):
     # "Test BZ2File.xreadlines()"
     self.createTempFile()
     bz2f = BZ2File(self.filename)
     sio = StringIO(self.TEXT)
     self.assertEqual(list(bz2f.xreadlines()), sio.readlines())
     bz2f.close()
Beispiel #23
0
 def next(self):
     resultBuilder = StringIO()
     for i in range(0, self.length):
         pos = self.rv.randint(0, len(self.alphabet) - 1)
         resultBuilder.write(self.alphabet[pos])
         i += 1
     return resultBuilder.getvalue()
Beispiel #24
0
def fetch_image_from_url(file_url):
    """Returns an UploadedFile object after retrieving the file at the given URL."""
    inStream = urllib2.urlopen(file_url)

    parser = ImageFile.Parser()
    file_size = 0
    max_file_size = 20 * 1024 * 1024 # 20 megabytes
    read_size = 1024
    while True:
        s = inStream.read(read_size)
        file_size += len(s)
        if not s:
            break
        if file_size > max_file_size:
            raise Exception("file size exceeded max size: %s bytes" % max_file_size)
        parser.feed(s)

    inImage = parser.close()
    # convert to RGB to avoid error with png and tiffs
    #if inImage.mode != "RGB":
    #    inImage = inImage.convert("RGB")

    img_temp = StringIO()
    inImage.save(img_temp, 'PNG')
    img_temp.seek(0)

    file_object = File(img_temp, 'img_temp.png')
    uploaded_file = UploadedFile(file=file_object, name=file_object.name, content_type='image/png', size=file_size, charset=None)

    return uploaded_file
Beispiel #25
0
def email_as_string(mail):
    """
    Converts the given message to a string, without mangling "From" lines
    (like as_string() does).

    :param mail: email to convert to string
    :rtype: str
    """
    fp = StringIO()
    g = Generator(fp, mangle_from_=False, maxheaderlen=78)
    g.flatten(mail)
    as_string = RFC3156_canonicalize(fp.getvalue())

    if isinstance(mail, MIMEMultipart):
        # Get the boundary for later
        boundary = mail.get_boundary()

        # Workaround for http://bugs.python.org/issue14983:
        # Insert a newline before the outer mail boundary so that other mail
        # clients can verify the signature when sending an email which contains
        # attachments.
        as_string = re.sub(r'--(\r\n)--' + boundary,
                           '--\g<1>\g<1>--' + boundary,
                           as_string, flags=re.MULTILINE)

    return as_string
Beispiel #26
0
    def _download_manifest(self):
        """
        Download the manifest file, and process it to return an ISOManifest.

        :return: manifest of available ISOs
        :rtype:  pulp_rpm.plugins.db.models.ISOManifest
        """
        manifest_url = urljoin(self._repo_url, models.ISOManifest.FILENAME)
        # I probably should have called this manifest destination, but I couldn't help myself
        manifest_destiny = StringIO()
        manifest_request = request.DownloadRequest(manifest_url, manifest_destiny)
        self.downloader.download([manifest_request])
        # We can inspect the report status to see if we had an error when retrieving the manifest.
        if self.progress_report.state == self.progress_report.STATE_MANIFEST_FAILED:
            raise IOError(_("Could not retrieve %(url)s") % {'url': manifest_url})

        manifest_destiny.seek(0)
        try:
            manifest = models.ISOManifest(manifest_destiny, self._repo_url)
        except ValueError:
            self.progress_report.error_message = _('The PULP_MANIFEST file was not in the ' +
                                                   'expected format.')
            self.progress_report.state = self.progress_report.STATE_MANIFEST_FAILED
            raise ValueError(self.progress_report.error_message)

        return manifest
Beispiel #27
0
def create_zip_deck_file(deck):
    """Creates a zipped file containing the contents of the deck (XLS and media objects."""

    # create the string buffer to hold the contents of the zip file
    s = StringIO()

    # create the zipfile object
    zfile = zipfile.ZipFile(s, "w")

    # write the deck XLS file to the zip
    deck_file_output = utils.create_deck_file(deck.id)
    temp_dirpath = tempfile.mkdtemp()
    temp_filepath = os.path.join(temp_dirpath, "deck.xls")
    deck_file_output.save(temp_filepath)
    zfile.write(temp_filepath, arcname=os.path.split(temp_filepath)[1])
    shutil.rmtree(temp_dirpath) # must delete temp dir when we're done

    # lookup the unique field values in the deck of cards,
    # where the field values are the media object names
    card_list = queries.getDeckCardsList(deck.id)
    field_set = set()
    for c in card_list:
        for f in c['fields']:
            if f['type'] not in ('T', 'M'):
                field_set.add(f['value'])

    # add each media object ot the zip file
    for file_name in field_set:
        file_contents = MediaStoreService.readFileContents(file_name)
        if file_contents is not None:
            zfile.writestr(file_name, file_contents)

    zfile.close()

    return s.getvalue()
Beispiel #28
0
 def testReadLines(self):
     # "Test BZ2File.readlines()"
     self.createTempFile()
     with BZ2File(self.filename) as bz2f:
         self.assertRaises(TypeError, bz2f.readlines, None)
         sio = StringIO(self.TEXT)
         self.assertEqual(bz2f.readlines(), sio.readlines())
Beispiel #29
0
def log_error(message, filename, action=None, label='Error'):
    """Writer error message to log file.

    Helper function for :func:`flush_log`, :func:`process_error`.

    :param message: error message
    :type message: string
    :param filename: image filename
    :type filename: string
    :param label: ``'Error'`` or ``'Warning'``
    :type label: string
    :returns: error log details
    :rtype: string
    """
    global ERROR_LOG_COUNTER
    details = ''
    if action:
        details += os.linesep + 'Action:' + \
                    pprint.pformat(action.dump())
    ERROR_LOG_FILE.write(os.linesep.join([
        u'%s %d:%s' % (label, ERROR_LOG_COUNTER, message),
        details,
        os.linesep,
    ]))
    try:
        traceback.print_exc(file=ERROR_LOG_FILE)
    except UnicodeDecodeError:
        stringio = StringIO()
        traceback.print_exc(file=stringio)
        traceb = stringio.read()
        ERROR_LOG_FILE.write(unicode(traceb, ENCODING, 'replace'))
    ERROR_LOG_FILE.write('*' + os.linesep)
    ERROR_LOG_FILE.flush()
    ERROR_LOG_COUNTER += 1
    return details
Beispiel #30
0
	def pack(self):
		"""Pack data for transfer."""
		s = StringIO()
		p = pickle.Pickler(s)
		p.dump(self)
		data = s.getvalue()
		return struct.pack('!HHI', VERSION[0], VERSION[1], len(data)) + data
Beispiel #31
0
def bytes_to_file(bytes_image):

    return StringIO(bytes_image)
Beispiel #32
0
 def __str__(self):
     from cStringIO import StringIO
     file_str = StringIO()
     for order in self:
         file_str.write("%s\n" % str(order))
     return file_str.getvalue()
Beispiel #33
0
    def load_ora(self, filename, feedback_cb=None):
        """Loads from an OpenRaster file"""
        print 'load_ora:'
        t0 = time.time()
        z = zipfile.ZipFile(filename)
        print 'mimetype:', z.read('mimetype').strip()
        xml = z.read('stack.xml')
        image = ET.fromstring(xml)
        stack = image.find('stack')

        w = int(image.attrib['w'])
        h = int(image.attrib['h'])

        def round_up_to_n(value, n):
            assert value >= 0, "function undefined for negative numbers"

            residual = value % n
            if residual:
                value = value - residual + n
            return int(value)

        def get_pixbuf(filename):
            t1 = time.time()

            try:
                fp = z.open(filename, mode='r')
            except KeyError:
                # support for bad zip files (saved by old versions of the GIMP ORA plugin)
                fp = z.open(filename.encode('utf-8'), mode='r')
                print 'WARNING: bad OpenRaster ZIP file. There is an utf-8 encoded filename that does not have the utf-8 flag set:', repr(filename)

            res = self._pixbuf_from_stream(fp, feedback_cb)
            fp.close()
            print '  %.3fs loading %s' % (time.time() - t1, filename)
            return res

        def get_layers_list(root, x=0,y=0):
            res = []
            for item in root:
                if item.tag == 'layer':
                    if 'x' in item.attrib:
                        item.attrib['x'] = int(item.attrib['x']) + x
                    if 'y' in item.attrib:
                        item.attrib['y'] = int(item.attrib['y']) + y
                    res.append(item)
                elif item.tag == 'stack':
                    stack_x = int( item.attrib.get('x', 0) )
                    stack_y = int( item.attrib.get('y', 0) )
                    res += get_layers_list(item, stack_x, stack_y)
                else:
                    print 'Warning: ignoring unsupported tag:', item.tag
            return res

        self.clear() # this leaves one empty layer
        no_background = True
        # FIXME: don't require tile alignment for frame
        self.set_frame(width=round_up_to_n(w, N), height=round_up_to_n(h, N))

        for layer in get_layers_list(stack):
            a = layer.attrib

            if 'background_tile' in a:
                assert no_background
                try:
                    print a['background_tile']
                    self.set_background(get_pixbuf(a['background_tile']))
                    no_background = False
                    continue
                except backgroundsurface.BackgroundError, e:
                    print 'ORA background tile not usable:', e

            src = a.get('src', '')
            if not src.lower().endswith('.png'):
                print 'Warning: ignoring non-png layer'
                continue
            pixbuf = get_pixbuf(src)
            name = a.get('name', '')
            x = int(a.get('x', '0'))
            y = int(a.get('y', '0'))
            opac = float(a.get('opacity', '1.0'))
            compositeop = str(a.get('composite-op', DEFAULT_COMPOSITE_OP))
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP

            visible = not 'hidden' in a.get('visibility', 'visible')
            self.add_layer(insert_idx=0, name=name)
            last_pixbuf = pixbuf
            t1 = time.time()
            self.load_layer_from_pixbuf(pixbuf, x, y)
            layer = self.layers[0]

            self.set_layer_opacity(helpers.clamp(opac, 0.0, 1.0), layer)
            self.set_layer_compositeop(compositeop, layer)
            self.set_layer_visibility(visible, layer)
            print '  %.3fs converting pixbuf to layer format' % (time.time() - t1)
            # strokemap
            fname = a.get('mypaint_strokemap_v2', None)
            if fname:
                if x % N or y % N:
                    print 'Warning: dropping non-aligned strokemap'
                else:
                    sio = StringIO(z.read(fname))
                    layer.load_strokemap_from_file(sio, x, y)
                    sio.close()
Beispiel #34
0
    def save_ora(self, filename, options=None, **kwargs):
        print 'save_ora:'
        t0 = time.time()
        tempdir = tempfile.mkdtemp('mypaint')
        # use .tmp extension, so we don't overwrite a valid file if there is an exception
        z = zipfile.ZipFile(filename + '.tmpsave', 'w', compression=zipfile.ZIP_STORED)
        # work around a permission bug in the zipfile library: http://bugs.python.org/issue3394
        def write_file_str(filename, data):
            zi = zipfile.ZipInfo(filename)
            zi.external_attr = 0100644 << 16
            z.writestr(zi, data)
        write_file_str('mimetype', 'image/openraster') # must be the first file
        image = ET.Element('image')
        stack = ET.SubElement(image, 'stack')
        x0, y0, w0, h0 = self.get_effective_bbox()
        a = image.attrib
        a['w'] = str(w0)
        a['h'] = str(h0)

        def store_pixbuf(pixbuf, name):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            pixbuf.save(tmp, 'png')
            print '  %.3fs pixbuf saving %s' % (time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def store_surface(surface, name, rect=[]):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            surface.save_as_png(tmp, *rect, **kwargs)
            print '  %.3fs surface saving %s' % (time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def add_layer(x, y, opac, surface, name, layer_name, visible=True, compositeop=DEFAULT_COMPOSITE_OP, rect=[]):
            layer = ET.Element('layer')
            stack.append(layer)
            store_surface(surface, name, rect)
            a = layer.attrib
            if layer_name:
                a['name'] = layer_name
            a['src'] = name
            a['x'] = str(x)
            a['y'] = str(y)
            a['opacity'] = str(opac)
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP
            a['composite-op'] = compositeop
            if visible:
                a['visibility'] = 'visible'
            else:
                a['visibility'] = 'hidden'
            return layer

        for idx, l in enumerate(reversed(self.layers)):
            if l.is_empty():
                continue
            opac = l.opacity
            x, y, w, h = l.get_bbox()
            el = add_layer(x-x0, y-y0, opac, l._surface, 'data/layer%03d.png' % idx, l.name, l.visible, l.compositeop, rect=(x, y, w, h))
            # strokemap
            sio = StringIO()
            l.save_strokemap_to_file(sio, -x, -y)
            data = sio.getvalue(); sio.close()
            name = 'data/layer%03d_strokemap.dat' % idx
            el.attrib['mypaint_strokemap_v2'] = name
            write_file_str(name, data)

        # save background as layer (solid color or tiled)
        bg = self.background
        # save as fully rendered layer
        x, y, w, h = self.get_bbox()
        l = add_layer(x-x0, y-y0, 1.0, bg, 'data/background.png', 'background',
                      DEFAULT_COMPOSITE_OP, rect=(x,y,w,h))
        x, y, w, h = bg.get_pattern_bbox()
        # save as single pattern (with corrected origin)
        store_surface(bg, 'data/background_tile.png', rect=(x+x0, y+y0, w, h))
        l.attrib['background_tile'] = 'data/background_tile.png'

        # preview (256x256)
        t2 = time.time()
        print '  starting to render full image for thumbnail...'

        thumbnail_pixbuf = self.render_thumbnail()
        store_pixbuf(thumbnail_pixbuf, 'Thumbnails/thumbnail.png')
        print '  total %.3fs spent on thumbnail' % (time.time() - t2)

        helpers.indent_etree(image)
        xml = ET.tostring(image, encoding='UTF-8')

        write_file_str('stack.xml', xml)
        z.close()
        os.rmdir(tempdir)
        if os.path.exists(filename):
            os.remove(filename) # windows needs that
        os.rename(filename + '.tmpsave', filename)

        print '%.3fs save_ora total' % (time.time() - t0)

        return thumbnail_pixbuf
Beispiel #35
0
def task(ctx, config):
    """
    Test radosgw-admin functionality against a running rgw instance.
    """
    global log
    assert config is None or isinstance(config, list) \
        or isinstance(config, dict), \
        "task s3tests only supports a list or dictionary for configuration"
    all_clients = [
        'client.{id}'.format(id=id_)
        for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')
    ]
    if config is None:
        config = all_clients
    if isinstance(config, list):
        config = dict.fromkeys(config)
    clients = config.keys()

    multi_region_run = rgw_utils.multi_region_enabled(ctx)

    client = clients[0]
    # default choice, multi-region code may overwrite this
    if multi_region_run:
        client = rgw_utils.get_master_client(ctx, clients)

    # once the client is chosen, pull the host name and  assigned port out of
    # the role_endpoints that were assigned by the rgw task
    (remote_host, remote_port) = ctx.rgw.role_endpoints[client]

    realm = ctx.rgw.realm
    log.debug('radosgw-admin: realm %r', realm)

    ##
    user1 = 'foo'
    user2 = 'fud'
    subuser1 = 'foo:foo1'
    subuser2 = 'foo:foo2'
    display_name1 = 'Foo'
    display_name2 = 'Fud'
    email = '*****@*****.**'
    email2 = '*****@*****.**'
    access_key = '9te6NH5mcdcq0Tc5i8i1'
    secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
    access_key2 = 'p5YnriCv1nAtykxBrupQ'
    secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
    swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
    swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'

    bucket_name = 'myfoo'
    bucket_name2 = 'mybar'

    # connect to rgw
    connection = boto.s3.connection.S3Connection(
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
        is_secure=False,
        port=remote_port,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
    )
    connection2 = boto.s3.connection.S3Connection(
        aws_access_key_id=access_key2,
        aws_secret_access_key=secret_key2,
        is_secure=False,
        port=remote_port,
        host=remote_host,
        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
    )

    # legend (test cases can be easily grep-ed out)
    # TESTCASE 'testname','object','method','operation','assertion'
    # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
    assert err

    # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
    (err, out) = rgwadmin(
        ctx,
        client, [
            'user', 'create', '--uid', user1, '--display-name', display_name1,
            '--email', email, '--access-key', access_key, '--secret',
            secret_key, '--max-buckets', '4'
        ],
        check_status=True)

    # TESTCASE 'duplicate email','user','create','existing user email','fails'
    (err, out) = rgwadmin(ctx, client, [
        'user',
        'create',
        '--uid',
        user2,
        '--display-name',
        display_name2,
        '--email',
        email,
    ])
    assert err

    # TESTCASE 'info-existing','user','info','existing user','returns correct info'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert out['user_id'] == user1
    assert out['email'] == email
    assert out['display_name'] == display_name1
    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key
    assert not out['suspended']

    # this whole block should only be run if regions have been configured
    if multi_region_run:
        rgw_utils.radosgw_agent_sync_all(ctx)
        # post-sync, validate that user1 exists on the sync destination host
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, dest_client,
                                  ['metadata', 'list', 'user'])
            (err, out) = rgwadmin(ctx,
                                  dest_client,
                                  ['user', 'info', '--uid', user1],
                                  check_status=True)
            assert out['user_id'] == user1
            assert out['email'] == email
            assert out['display_name'] == display_name1
            assert len(out['keys']) == 1
            assert out['keys'][0]['access_key'] == access_key
            assert out['keys'][0]['secret_key'] == secret_key
            assert not out['suspended']

        # compare the metadata between different regions, make sure it matches
        log.debug(
            'compare the metadata between different regions, make sure it matches'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(
                ctx,
                source_client,
                ['metadata', 'get', 'user:{uid}'.format(uid=user1)],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client,
                ['metadata', 'get', 'user:{uid}'.format(uid=user1)],
                check_status=True)
            assert out1 == out2

        # suspend a user on the master, then check the status on the destination
        log.debug(
            'suspend a user on the master, then check the status on the destination'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx, source_client,
                                  ['user', 'suspend', '--uid', user1])
            rgw_utils.radosgw_agent_sync_all(ctx)
            (err, out) = rgwadmin(ctx,
                                  dest_client,
                                  ['user', 'info', '--uid', user1],
                                  check_status=True)
            assert out['suspended']

        # delete a user on the master, then check that it's gone on the destination
        log.debug(
            'delete a user on the master, then check that it\'s gone on the destination'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx,
                                  source_client,
                                  ['user', 'rm', '--uid', user1],
                                  check_status=True)
            rgw_utils.radosgw_agent_sync_all(ctx)
            (err, out) = rgwadmin(ctx, source_client,
                                  ['user', 'info', '--uid', user1])
            assert out is None
            (err, out) = rgwadmin(ctx, dest_client,
                                  ['user', 'info', '--uid', user1])
            assert out is None

            # then recreate it so later tests pass
            (err, out) = rgwadmin(
                ctx,
                client, [
                    'user', 'create', '--uid', user1, '--display-name',
                    display_name1, '--email', email, '--access-key',
                    access_key, '--secret', secret_key, '--max-buckets', '4'
                ],
                check_status=True)

        # now do the multi-region bucket tests
        log.debug('now do the multi-region bucket tests')

        # Create a second user for the following tests
        log.debug('Create a second user for the following tests')
        (err, out) = rgwadmin(
            ctx,
            client, [
                'user', 'create', '--uid', user2, '--display-name',
                display_name2, '--email', email2, '--access-key', access_key2,
                '--secret', secret_key2, '--max-buckets', '4'
            ],
            check_status=True)
        (err, out) = rgwadmin(ctx,
                              client, ['user', 'info', '--uid', user2],
                              check_status=True)
        assert out is not None

        # create a bucket and do a sync
        log.debug('create a bucket and do a sync')
        bucket = connection.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug(
            'compare the metadata for the bucket between different regions, make sure it matches'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            log.debug('metadata 1 %r', out1)
            log.debug('metadata 2 %r', out2)
            assert out1 == out2

            # get the bucket.instance info and compare that
            src_bucket_id = out1['data']['bucket']['bucket_id']
            dest_bucket_id = out2['data']['bucket']['bucket_id']
            (err1, out1) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket.instance:{bucket_name}:{bucket_instance}'.format(
                        bucket_name=bucket_name2,
                        bucket_instance=src_bucket_id)
                ],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client, [
                    'metadata', 'get',
                    'bucket.instance:{bucket_name}:{bucket_instance}'.format(
                        bucket_name=bucket_name2,
                        bucket_instance=dest_bucket_id)
                ],
                check_status=True)
            assert out1 == out2

        same_region = 0
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']

            source_region = rgw_utils.region_for_client(ctx, source_client)
            dest_region = rgw_utils.region_for_client(ctx, dest_client)

            # 301 is only returned for requests to something in a different region
            if source_region == dest_region:
                log.debug(
                    '301 is only returned for requests to something in a different region'
                )
                same_region += 1
                continue

            # Attempt to create a new connection with user1 to the destination RGW
            log.debug(
                'Attempt to create a new connection with user1 to the destination RGW'
            )
            # and use that to attempt a delete (that should fail)

            (dest_remote_host,
             dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
            connection_dest = boto.s3.connection.S3Connection(
                aws_access_key_id=access_key,
                aws_secret_access_key=secret_key,
                is_secure=False,
                port=dest_remote_port,
                host=dest_remote_host,
                calling_format=boto.s3.connection.OrdinaryCallingFormat(),
            )

            # this should fail
            r, content = send_raw_http_request(connection_dest,
                                               'DELETE',
                                               bucket_name2,
                                               '',
                                               follow_redirects=False)
            assert r.status == 301

            # now delete the bucket on the source RGW and do another sync
            log.debug(
                'now delete the bucket on the source RGW and do another sync')
            bucket.delete()
            rgw_utils.radosgw_agent_sync_all(ctx)

        if same_region == len(ctx.radosgw_agent.config):
            bucket.delete()
            rgw_utils.radosgw_agent_sync_all(ctx)

        # make sure that the bucket no longer exists in either region
        log.debug(
            'make sure that the bucket no longer exists in either region')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(ctx, source_client, [
                'metadata', 'get',
                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
            ])
            (err2, out2) = rgwadmin(ctx, dest_client, [
                'metadata', 'get',
                'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
            ])
            # Both of the previous calls should have errors due to requesting
            # metadata for non-existent buckets
            assert err1
            assert err2

        # create a bucket and then sync it
        log.debug('create a bucket and then sync it')
        bucket = connection.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug(
            'compare the metadata for the bucket between different regions, make sure it matches'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            assert out1 == out2

        # Now delete the bucket and recreate it with a different user
        log.debug(
            'Now delete the bucket and recreate it with a different user')
        # within the same window of time and then sync.
        bucket.delete()
        bucket = connection2.create_bucket(bucket_name2)
        rgw_utils.radosgw_agent_sync_all(ctx)

        # compare the metadata for the bucket between different regions, make sure it matches
        log.debug(
            'compare the metadata for the bucket between different regions, make sure it matches'
        )
        # user2 should own the bucket in both regions
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err1, out1) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            assert out1 == out2
            assert out1['data']['owner'] == user2
            assert out1['data']['owner'] != user1

        # now we're going to use this bucket to test meta-data update propagation
        log.debug(
            'now we\'re going to use this bucket to test meta-data update propagation'
        )
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']

            # get the metadata so we can tweak it
            log.debug('get the metadata so we can tweak it')
            (err, orig_data) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)

            # manually edit mtime for this bucket to be 300 seconds in the past
            log.debug(
                'manually edit mtime for this bucket to be 300 seconds in the past'
            )
            new_data = copy.deepcopy(orig_data)
            mtime = datetime.datetime.strptime(
                orig_data['mtime'],
                "%Y-%m-%d %H:%M:%S.%fZ") - datetime.timedelta(300)
            new_data['mtime'] = unicode(
                mtime.strftime("%Y-%m-%d %H:%M:%S.%fZ"))
            log.debug("new mtime ", mtime)
            assert new_data != orig_data
            (err, out) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'put',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                stdin=StringIO(json.dumps(new_data)),
                check_status=True)

            # get the metadata and make sure that the 'put' worked
            log.debug('get the metadata and make sure that the \'put\' worked')
            (err, out) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            assert out == new_data

            # sync to propagate the new metadata
            log.debug('sync to propagate the new metadata')
            rgw_utils.radosgw_agent_sync_all(ctx)

            # get the metadata from the dest and compare it to what we just set
            log.debug(
                'get the metadata from the dest and compare it to what we just set'
            )
            # and what the source region has.
            (err1, out1) = rgwadmin(
                ctx,
                source_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            (err2, out2) = rgwadmin(
                ctx,
                dest_client, [
                    'metadata', 'get',
                    'bucket:{bucket_name}'.format(bucket_name=bucket_name2)
                ],
                check_status=True)
            # yeah for the transitive property
            assert out1 == out2
            assert out1 == new_data

        # now we delete the bucket
        log.debug('now we delete the bucket')
        bucket.delete()

        log.debug('sync to propagate the deleted bucket')
        rgw_utils.radosgw_agent_sync_all(ctx)

        # Delete user2 as later tests do not expect it to exist.
        # Verify that it is gone on both regions
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            source_client = c_config['src']
            dest_client = c_config['dest']
            (err, out) = rgwadmin(ctx,
                                  source_client,
                                  ['user', 'rm', '--uid', user2],
                                  check_status=True)
            rgw_utils.radosgw_agent_sync_all(ctx)
            # The two 'user info' calls should fail and not return any data
            # since we just deleted this user.
            (err, out) = rgwadmin(ctx, source_client,
                                  ['user', 'info', '--uid', user2])
            assert out is None
            (err, out) = rgwadmin(ctx, dest_client,
                                  ['user', 'info', '--uid', user2])
            assert out is None

        # Test data sync

        # First create a bucket for data sync test purpose
        bucket = connection.create_bucket(bucket_name + 'data')

        # Create a tiny file and check if in sync
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            if c_config.get('metadata-only'):
                continue

            for full in (True, False):
                source_client = c_config['src']
                dest_client = c_config['dest']
                k = boto.s3.key.Key(bucket)
                k.key = 'tiny_file'
                k.set_contents_from_string("123456789")
                safety_window = rgw_utils.radosgw_data_log_window(
                    ctx, source_client)
                time.sleep(safety_window)
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)
                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
                dest_connection = boto.s3.connection.S3Connection(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    is_secure=False,
                    port=dest_port,
                    host=dest_host,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                dest_k = dest_connection.get_bucket(
                    bucket_name + 'data').get_key('tiny_file')
                assert k.get_contents_as_string(
                ) == dest_k.get_contents_as_string()

                # check that deleting it removes it from the dest zone
                k.delete()
                time.sleep(safety_window)
                # full sync doesn't handle deleted objects yet
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)

                dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
                dest_k = dest_bucket.get_key('tiny_file')
                assert dest_k == None, 'object not deleted from destination zone'

        # finally we delete the bucket
        bucket.delete()

        bucket = connection.create_bucket(bucket_name + 'data2')
        for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
            if c_config.get('metadata-only'):
                continue

            for full in (True, False):
                source_client = c_config['src']
                dest_client = c_config['dest']
                (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
                dest_connection = boto.s3.connection.S3Connection(
                    aws_access_key_id=access_key,
                    aws_secret_access_key=secret_key,
                    is_secure=False,
                    port=dest_port,
                    host=dest_host,
                    calling_format=boto.s3.connection.OrdinaryCallingFormat(),
                )
                for i in range(20):
                    k = boto.s3.key.Key(bucket)
                    k.key = 'tiny_file_' + str(i)
                    k.set_contents_from_string(str(i) * 100)

                safety_window = rgw_utils.radosgw_data_log_window(
                    ctx, source_client)
                time.sleep(safety_window)
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=full)

                for i in range(20):
                    dest_k = dest_connection.get_bucket(
                        bucket_name + 'data2').get_key('tiny_file_' + str(i))
                    assert (str(i) * 100) == dest_k.get_contents_as_string()
                    k = boto.s3.key.Key(bucket)
                    k.key = 'tiny_file_' + str(i)
                    k.delete()

                # check that deleting removes the objects from the dest zone
                time.sleep(safety_window)
                # full sync doesn't delete deleted objects yet
                rgw_utils.radosgw_agent_sync_all(ctx, data=True, full=False)

                for i in range(20):
                    dest_bucket = dest_connection.get_bucket(bucket_name +
                                                             'data2')
                    dest_k = dest_bucket.get_key('tiny_file_' + str(i))
                    assert dest_k == None, 'object %d not deleted from destination zone' % i
        bucket.delete()

    # end of 'if multi_region_run:'

    # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'suspend', '--uid', user1],
                          check_status=True)

    # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert out['suspended']

    # TESTCASE 're-enable','user','enable','suspended user','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'enable', '--uid', user1],
                          check_status=True)

    # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert not out['suspended']

    # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'key',
                              'create',
                              '--uid',
                              user1,
                              '--access-key',
                              access_key2,
                              '--secret',
                              secret_key2,
                          ],
                          check_status=True)

    # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert len(out['keys']) == 2
    assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1][
        'access_key'] == access_key2
    assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1][
        'secret_key'] == secret_key2

    # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'key',
                              'rm',
                              '--uid',
                              user1,
                              '--access-key',
                              access_key2,
                          ],
                          check_status=True)
    assert len(out['keys']) == 1
    assert out['keys'][0]['access_key'] == access_key
    assert out['keys'][0]['secret_key'] == secret_key

    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
    subuser_access = 'full'
    subuser_perm = 'full-control'

    (err, out) = rgwadmin(ctx,
                          client, [
                              'subuser', 'create', '--subuser', subuser1,
                              '--access', subuser_access
                          ],
                          check_status=True)

    # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'subuser',
                              'modify',
                              '--subuser',
                              subuser1,
                              '--secret',
                              swift_secret1,
                              '--key-type',
                              'swift',
                          ],
                          check_status=True)

    # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])

    assert out['subusers'][0]['permissions'] == subuser_perm

    # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert len(out['swift_keys']) == 1
    assert out['swift_keys'][0]['user'] == subuser1
    assert out['swift_keys'][0]['secret_key'] == swift_secret1

    # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'subuser',
                              'create',
                              '--subuser',
                              subuser2,
                              '--secret',
                              swift_secret2,
                              '--key-type',
                              'swift',
                          ],
                          check_status=True)

    # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'info', '--uid', user1],
                          check_status=True)
    assert len(out['swift_keys']) == 2
    assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1][
        'user'] == subuser2
    assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out[
        'swift_keys'][1]['secret_key'] == swift_secret2

    # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'key',
                              'rm',
                              '--subuser',
                              subuser1,
                              '--key-type',
                              'swift',
                          ],
                          check_status=True)
    assert len(out['swift_keys']) == 1

    # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'subuser',
                              'rm',
                              '--subuser',
                              subuser1,
                          ],
                          check_status=True)
    assert len(out['subusers']) == 1

    # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
    (err, out) = rgwadmin(ctx,
                          client, [
                              'subuser',
                              'rm',
                              '--subuser',
                              subuser2,
                              '--key-type',
                              'swift',
                              '--purge-keys',
                          ],
                          check_status=True)
    assert len(out['swift_keys']) == 0
    assert len(out['subusers']) == 0

    # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'stats', '--uid', user1],
                          check_status=True)
    assert len(out) == 0

    if multi_region_run:
        rgw_utils.radosgw_agent_sync_all(ctx)

    # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'list', '--uid', user1],
                          check_status=True)
    assert len(out) == 0

    # create a first bucket
    bucket = connection.create_bucket(bucket_name)

    # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'list', '--uid', user1],
                          check_status=True)
    assert len(out) == 1
    assert out[0] == bucket_name

    # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
    (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
    assert len(out) >= 1
    assert bucket_name in out

    # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
    bucket2 = connection.create_bucket(bucket_name + '2')
    bucket3 = connection.create_bucket(bucket_name + '3')
    bucket4 = connection.create_bucket(bucket_name + '4')
    # the 5th should fail.
    failed = False
    try:
        connection.create_bucket(bucket_name + '5')
    except Exception:
        failed = True
    assert failed

    # delete the buckets
    bucket2.delete()
    bucket3.delete()
    bucket4.delete()

    # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'stats', '--bucket', bucket_name],
                          check_status=True)
    assert out['owner'] == user1
    bucket_id = out['id']

    # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'stats', '--uid', user1],
                          check_status=True)
    assert len(out) == 1
    assert out[0][
        'id'] == bucket_id  # does it return the same ID twice in a row?

    # use some space
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('one')

    # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'stats', '--bucket', bucket_name],
                          check_status=True)
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 1
    assert out['usage']['rgw.main']['size_kb'] > 0

    # reclaim it
    key.delete()

    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
    (err, out) = rgwadmin(
        ctx,
        client, ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
        check_status=True)

    # create a second user to link the bucket to
    (err, out) = rgwadmin(ctx,
                          client, [
                              'user',
                              'create',
                              '--uid',
                              user2,
                              '--display-name',
                              display_name2,
                              '--access-key',
                              access_key2,
                              '--secret',
                              secret_key2,
                              '--max-buckets',
                              '1',
                          ],
                          check_status=True)

    # try creating an object with the first user before the bucket is relinked
    denied = False
    key = boto.s3.key.Key(bucket)

    try:
        key.set_contents_from_string('two')
    except boto.exception.S3ResponseError:
        denied = True

    assert not denied

    # delete the object
    key.delete()

    # link the bucket to another user
    (err,
     out) = rgwadmin(ctx,
                     client,
                     ['metadata', 'get', 'bucket:{n}'.format(n=bucket_name)],
                     check_status=True)

    bucket_data = out['data']
    assert bucket_data['bucket']['name'] == bucket_name

    bucket_id = bucket_data['bucket']['bucket_id']

    # link the bucket to another user
    (err, out) = rgwadmin(ctx,
                          client, [
                              'bucket', 'link', '--uid', user2, '--bucket',
                              bucket_name, '--bucket-id', bucket_id
                          ],
                          check_status=True)

    # try to remove user, should fail (has a linked bucket)
    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
    assert err

    # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
    (err, out) = rgwadmin(
        ctx,
        client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
        check_status=True)

    # relink the bucket to the first user and delete the second user
    (err, out) = rgwadmin(ctx,
                          client, [
                              'bucket', 'link', '--uid', user1, '--bucket',
                              bucket_name, '--bucket-id', bucket_id
                          ],
                          check_status=True)

    (err, out) = rgwadmin(ctx,
                          client, ['user', 'rm', '--uid', user2],
                          check_status=True)

    # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'

    # upload an object
    object_name = 'four'
    key = boto.s3.key.Key(bucket, object_name)
    key.set_contents_from_string(object_name)

    # now delete it
    (err, out) = rgwadmin(
        ctx,
        client,
        ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
        check_status=True)

    # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
    (err, out) = rgwadmin(ctx,
                          client, ['bucket', 'stats', '--bucket', bucket_name],
                          check_status=True)
    assert out['id'] == bucket_id
    assert out['usage']['rgw.main']['num_objects'] == 0

    # list log objects
    # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
    (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
    assert len(out) > 0

    for obj in out:
        # TESTCASE 'log-show','log','show','after activity','returns expected info'
        if obj[:
               4] == 'meta' or obj[:
                                   4] == 'data' or obj[:
                                                       18] == 'obj_delete_at_hint':
            continue

        (err, rgwlog) = rgwadmin(ctx,
                                 client, ['log', 'show', '--object', obj],
                                 check_status=True)
        assert len(rgwlog) > 0

        # exempt bucket_name2 from checking as it was only used for multi-region tests
        assert rgwlog['bucket'].find(
            bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
        assert rgwlog['bucket'] != bucket_name or rgwlog[
            'bucket_id'] == bucket_id
        assert rgwlog['bucket_owner'] == user1 or rgwlog[
            'bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
        for entry in rgwlog['log_entries']:
            log.debug('checking log entry: ', entry)
            assert entry['bucket'] == rgwlog['bucket']
            possible_buckets = [bucket_name + '5', bucket_name2]
            user = entry['user']
            assert user == user1 or user.endswith('system-user') or \
                rgwlog['bucket'] in possible_buckets

        # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
        (err, out) = rgwadmin(ctx,
                              client, ['log', 'rm', '--object', obj],
                              check_status=True)

    # TODO: show log by bucket+date

    # need to wait for all usage data to get flushed, should take up to 30 seconds
    timestamp = time.time()
    while time.time() - timestamp <= (20 * 60):  # wait up to 20 minutes
        (err, out) = rgwadmin(
            ctx, client,
            ['usage', 'show', '--categories', 'delete_obj'
             ])  # last operation we did is delete obj, wait for it to flush
        if get_user_successful_ops(out, user1) > 0:
            break
        time.sleep(1)

    assert time.time() - timestamp <= (20 * 60)

    # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
    (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0

    user_summary = get_user_summary(out, user1)

    total = user_summary['total']
    assert total['successful_ops'] > 0

    # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
    (err, out) = rgwadmin(ctx,
                          client, ['usage', 'show', '--uid', user1],
                          check_status=True)
    assert len(out['entries']) > 0
    assert len(out['summary']) > 0
    user_summary = out['summary'][0]
    for entry in user_summary['categories']:
        assert entry['successful_ops'] > 0
    assert user_summary['user'] == user1

    # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
    test_categories = [
        'create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'
    ]
    for cat in test_categories:
        (err, out) = rgwadmin(
            ctx,
            client, ['usage', 'show', '--uid', user1, '--categories', cat],
            check_status=True)
        assert len(out['summary']) > 0
        user_summary = out['summary'][0]
        assert user_summary['user'] == user1
        assert len(user_summary['categories']) == 1
        entry = user_summary['categories'][0]
        assert entry['category'] == cat
        assert entry['successful_ops'] > 0

    # the usage flush interval is 30 seconds, wait that much an then some
    # to make sure everything has been flushed
    time.sleep(35)

    # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
    (err, out) = rgwadmin(ctx,
                          client, ['usage', 'trim', '--uid', user1],
                          check_status=True)
    (err, out) = rgwadmin(ctx,
                          client, ['usage', 'show', '--uid', user1],
                          check_status=True)
    assert len(out['entries']) == 0
    assert len(out['summary']) == 0

    # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'suspend', '--uid', user1],
                          check_status=True)

    # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
    try:
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string('five')
    except boto.exception.S3ResponseError as e:
        assert e.status == 403

    # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
    (err, out) = rgwadmin(ctx,
                          client, ['user', 'enable', '--uid', user1],
                          check_status=True)

    # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('six')

    # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'

    # create an object large enough to be split into multiple parts
    test_string = 'foo' * 10000000

    big_key = boto.s3.key.Key(bucket)
    big_key.set_contents_from_string(test_string)

    # now delete the head
    big_key.delete()

    # wait a bit to give the garbage collector time to cycle
    time.sleep(15)

    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])

    assert len(out) > 0

    # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
    (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)

    #confirm
    (err, out) = rgwadmin(ctx, client, ['gc', 'list'])

    assert len(out) == 0

    # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
    assert err

    # delete should fail because ``key`` still exists
    try:
        bucket.delete()
    except boto.exception.S3ResponseError as e:
        assert e.status == 409

    key.delete()
    bucket.delete()

    # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
    bucket = connection.create_bucket(bucket_name)

    # create an object
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('seven')

    # should be private already but guarantee it
    key.set_acl('private')

    (err,
     out) = rgwadmin(ctx,
                     client,
                     ['policy', '--bucket', bucket.name, '--object', key.key],
                     check_status=True,
                     format='xml')

    acl = get_acl(key)

    assert acl == out.strip('\n')

    # add another grantee by making the object public read
    key.set_acl('public-read')

    (err,
     out) = rgwadmin(ctx,
                     client,
                     ['policy', '--bucket', bucket.name, '--object', key.key],
                     check_status=True,
                     format='xml')

    acl = get_acl(key)

    assert acl == out.strip('\n')

    # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key_name = ['eight', 'nine', 'ten', 'eleven']
    for i in range(4):
        key = boto.s3.key.Key(bucket)
        key.set_contents_from_string(key_name[i])

    (err, out) = rgwadmin(
        ctx,
        client, ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
        check_status=True)

    # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
    caps = 'user=read'
    (err, out) = rgwadmin(ctx, client,
                          ['caps', 'add', '--uid', user1, '--caps', caps])

    assert out['caps'][0]['perm'] == 'read'

    # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
    (err, out) = rgwadmin(ctx, client,
                          ['caps', 'rm', '--uid', user1, '--caps', caps])

    assert not out['caps']

    # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)

    (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
    assert err

    # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
    bucket = connection.create_bucket(bucket_name)
    key = boto.s3.key.Key(bucket)
    key.set_contents_from_string('twelve')

    (err, out) = rgwadmin(ctx,
                          client,
                          ['user', 'rm', '--uid', user1, '--purge-data'],
                          check_status=True)

    # TESTCASE 'rm-user3','user','rm','deleted user','fails'
    (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
    assert err

    # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
    #

    if realm is None:
        (err, out) = rgwadmin(ctx, client,
                              ['zone', 'get', '--rgw-zone', 'default'])
    else:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
    orig_placement_pools = len(out['placement_pools'])

    # removed this test, it is not correct to assume that zone has default placement, it really
    # depends on how we set it up before
    #
    # assert len(out) > 0
    # assert len(out['placement_pools']) == 1

    # default_rule = out['placement_pools'][0]
    # assert default_rule['key'] == 'default-placement'

    rule = {
        'key': 'new-placement',
        'val': {
            'data_pool': '.rgw.buckets.2',
            'index_pool': '.rgw.buckets.index.2'
        }
    }

    out['placement_pools'].append(rule)

    (err, out) = rgwadmin(ctx,
                          client, ['zone', 'set'],
                          stdin=StringIO(json.dumps(out)),
                          check_status=True)

    if realm is None:
        (err, out) = rgwadmin(ctx, client,
                              ['zone', 'get', '--rgw-zone', 'default'])
    else:
        (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
    assert len(out) > 0
    assert len(out['placement_pools']) == orig_placement_pools + 1
Beispiel #36
0
 def dump(self):
     buf = StringIO()
     # Message id
     buf.write("{ID:%u" % self.msg.msgid)
     # Process message arguments
     while self.msg.buf.getPos() < len(self.msg.buf):
         # Read type, Rewind for further decoding
         datatype = self._readByte()
         self.msg.buf.setPos(self.msg.buf.getPos() - 1)
         if datatype == protocol.DATA_TYPE_I8:
             buf.write(", I8:%d" % self.readI8())
         elif datatype == protocol.DATA_TYPE_U8:
             buf.write(", U8:%u" % self.readU8())
         elif datatype == protocol.DATA_TYPE_I16:
             buf.write(", I16:%d" % self.readI16())
         elif datatype == protocol.DATA_TYPE_U16:
             buf.write(", U16:%u" % self.readU16())
         elif datatype == protocol.DATA_TYPE_I32:
             buf.write(", I32:%d" % self.readI32())
         elif datatype == protocol.DATA_TYPE_U32:
             buf.write(", U32:%u" % self.readU32())
         elif datatype == protocol.DATA_TYPE_I64:
             buf.write(", I64:%d" % self.readI64())
         elif datatype == protocol.DATA_TYPE_U64:
             buf.write(", U64:%u" % self.readU64())
         elif datatype == protocol.DATA_TYPE_STR:
             buf.write(", STR:'%s'" % self.readStr())
         elif datatype == protocol.DATA_TYPE_BUF:
             buf.write(", BUF:'%s'" % repr(self.readBuf()))
         elif datatype == protocol.DATA_TYPE_F32:
             buf.write(", F32:%s" % str(self.readF32()))
         elif datatype == protocol.DATA_TYPE_F64:
             buf.write(", F64:%s" % str(self.readF64()))
         else:
             raise DecodeException("decoder : unknown type: %d" % datatype)
     buf.write("}")
     return buf.getvalue()
Beispiel #37
0
def view_file(repo, identifier, filename, username=None):
    """ Displays the content of a file or a tree for the specified repo.
    """
    repo = pagure.lib.get_project(SESSION, repo, user=username)

    if not repo:
        flask.abort(404, 'Project not found')

    reponame = pagure.get_repo_path(repo)

    repo_obj = pygit2.Repository(reponame)

    if repo_obj.is_empty:
        flask.abort(404, 'Empty repo cannot have a file')

    if identifier in repo_obj.listall_branches():
        branchname = identifier
        branch = repo_obj.lookup_branch(identifier)
        commit = branch.get_object()
    else:
        try:
            commit = repo_obj.get(identifier)
            branchname = identifier
        except ValueError:
            if 'master' not in repo_obj.listall_branches():
                flask.abort(404, 'Branch no found')
            # If it's not a commit id then it's part of the filename
            commit = repo_obj[repo_obj.head.target]
            branchname = 'master'

    if commit and not isinstance(commit, pygit2.Blob):
        content = __get_file_in_tree(
            repo_obj, commit.tree, filename.split('/'), bail_on_tree=True)
        if not content:
            flask.abort(404, 'File not found')
        content = repo_obj[content.oid]
    else:
        content = commit

    if not content:
        flask.abort(404, 'File not found')

    if isinstance(content, pygit2.Blob):
        if content.is_binary or not pagure.lib.could_be_text(content.data):
            ext = filename[filename.rfind('.'):]
            if ext in (
                    '.gif', '.png', '.bmp', '.tif', '.tiff', '.jpg',
                    '.jpeg', '.ppm', '.pnm', '.pbm', '.pgm', '.webp', '.ico'):
                try:
                    Image.open(StringIO(content.data))
                    output_type = 'image'
                except IOError as err:
                    LOG.debug(
                        'Failed to load image %s, error: %s', filename, err
                    )
                    output_type = 'binary'
            else:
                output_type = 'binary'
        else:
            try:
                lexer = guess_lexer_for_filename(
                    filename,
                    content.data
                )
            except (ClassNotFound, TypeError):
                lexer = TextLexer()

            content = highlight(
                content.data,
                lexer,
                HtmlFormatter(
                    noclasses=True,
                    style="tango",)
            )
            output_type = 'file'
    else:
        content = sorted(content, key=lambda x: x.filemode)
        output_type = 'tree'

    return flask.render_template(
        'file.html',
        select='tree',
        repo=repo,
        username=username,
        branchname=branchname,
        filename=filename,
        content=content,
        output_type=output_type,
        repo_admin=is_repo_admin(repo),
    )
 def test_unicode_fieldnames(self):
     from fusionbox.unicode_csv import DictReader
     s = StringIO('"\xe2\x98\x83"')
     fb_reader = DictReader(s)
     self.assertEquals(fb_reader.fieldnames, [u'\u2603'])
 def test_readrow_unicode(self):
     s = StringIO('"\xe2\x98\x83"')
     fb_reader = UnicodeReader(s)
     self.assertEquals(fb_reader.next(), [u'\u2603'])
Beispiel #40
0
    def post(self, request, pk):
        """ Process download form to collect objects and create download file."""

        # get the story and associated facets no matter what options are selected
        story_id = request.POST.get('story')
        story = get_object_or_404(Story, id=pk)
        story_txt = story.get_story_download()
        select_all_images = story.get_story_images()
        select_all_documents = story.get_story_documents()
        select_all_audio = story.get_story_audio()
        select_all_video = story.get_story_video()
        image_txt = ""
        document_txt = ""
        audio_txt = ""
        video_txt = ""

        # Set up zip file
        fp = StringIO()
        z = ZipFile(fp, mode="w")
        # Always Zip up story meta
        z.writestr("story_details.txt", story_txt)

        # ------------------------------ #
        #          IF SELECT ALL         #
        # ------------------------------ #
        # if select_all is chosen, then all items will be downloaded
        story_sa_id = request.POST.get('select_all')

        if story_sa_id:
            story = get_object_or_404(Story, id=story_sa_id)

        if story_sa_id:
            # Zip up all facets and assets including story metadata
            for facet in story.facet_set.all():
                z.writestr("{name}.txt".format(name=facet.name),
                           facet.get_facet_download())

            for image in select_all_images:
                z.writestr("{image}.jpg".format(image=image.title),
                           image.photo.read())
                new_info = image.get_asset_download_info()
                image_txt += new_info
            for document in select_all_documents:
                if document.asset_type == "PDF":
                    z.writestr(
                        "{document}.pdf".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "WORD DOC":
                    z.writestr(
                        "{document}.docx".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "TEXT":
                    z.writestr(
                        "{document}.txt".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "COMMA SEPARATED":
                    z.writestr(
                        "{document}.csv".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "EXCEL":
                    z.writestr(
                        "{document}.xls".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
            for audiofile in select_all_audio:
                if audiofile.asset_type == "MP3":
                    z.writestr(
                        "{audiofile}.mp3".format(audiofile=audiofile.title),
                        audiofile.audio.read())
                    new_info = audiofile.get_asset_download_info()
                    audio_txt += new_info
                if audiofile.asset_type == "WAV":
                    z.writestr(
                        "{audiofile}.wav".format(audiofile=audiofile.title),
                        audiofile.audio.read())
                    new_info = audiofile.get_asset_download_info()
                    audio_txt += new_info
            for video in select_all_video:
                if video.asset_type == "YOUTUBE":
                    # text = video.link.encode('utf-8')
                    # title = video.title.encode('utf-8')
                    # z.writestr("{title}_youtube_link.txt".format(title=title), text)
                    new_info = video.get_asset_download_info()
                    video_txt += new_info
                if video.asset_type == "VIMEO":
                    # text = video.link.encode('utf-8')
                    # title = video.title.encode('utf-8')
                    # z.writestr("{title}_vimeo_link.txt".format(title=title), text)
                    new_info = video.get_asset_download_info()
                    video_txt += new_info

        # user can also select download all items associated with specific facets
        # ------------------------------ #
        #        IF FACET ALL         #
        # ------------------------------ #
        facet_sa_id = request.POST.getlist('facet_select_all')

        if facet_sa_id:
            for facet in facet_sa_id:
                facet = get_object_or_404(Facet, id=facet)
                # Zip up story meta, facet content and facet images
                if facet:
                    z.writestr("{name}.txt".format(name=facet.name),
                               facet.get_facet_download())
                for image in facet.image_assets.all():
                    z.writestr("{image}.jpg".format(image=image.title),
                               image.photo.read())
                    new_info = image.get_asset_download_info()
                    image_txt += new_info
                for document in facet.document_assets.all():
                    if document.asset_type == "PDF":
                        z.writestr(
                            "{document}.pdf".format(document=document.title),
                            document.document.read())
                        new_info = document.get_asset_download_info()
                        document_txt += new_info
                    if document.asset_type == "WORD DOC":
                        z.writestr(
                            "{document}.docx".format(document=document.title),
                            document.document.read())
                        new_info = document.get_asset_download_info()
                        document_txt += new_info
                    if document.asset_type == "TEXT":
                        z.writestr(
                            "{document}.txt".format(document=document.title),
                            document.document.read())
                        new_info = document.get_asset_download_info()
                        document_txt += new_info
                    if document.asset_type == "COMMA SEPARATED":
                        z.writestr(
                            "{document}.csv".format(document=document.title),
                            document.document.read())
                        new_info = document.get_asset_download_info()
                        document_txt += new_info
                    if document.asset_type == "EXCEL":
                        z.writestr(
                            "{document}.xls".format(document=document.title),
                            document.document.read())
                        new_info = document.get_asset_download_info()
                        document_txt += new_info
                for audiofile in facet.audio_assets.all():
                    if audiofile.asset_type == "MP3":
                        z.writestr(
                            "{audiofile}.mp3".format(
                                audiofile=audiofile.title),
                            audiofile.audio.read())
                        new_info = audiofile.get_asset_download_info()
                        audio_txt += new_info
                    if audiofile.asset_type == "WAV":
                        z.writestr(
                            "{audiofile}.wav".format(
                                audiofile=audiofile.title),
                            audiofile.audio.read())
                        new_info = audiofile.get_asset_download_info()
                        audio_txt += new_info
                for video in facet.video_assets.all():
                    if video.asset_type == "YOUTUBE":
                        # text = video.link.encode('utf-8')
                        # title = video.title.encode('utf-8')
                        # z.writestr("{title}_youtube_link.txt".format(title=title), text)
                        new_info = video.get_asset_download_info()
                        video_txt += new_info
                    if video.asset_type == "VIMEO":
                        # text = video.link.encode('utf-8')
                        # title = video.title.encode('utf-8')
                        # z.writestr("{title}_vimeo_link.txt".format(title=title), text)
                        new_info = video.get_asset_download_info()
                        video_txt += new_info

        # if not select all OR facet select all, then user chooses the facet and the images
        # ------------------------------ #
        #      IF FACET SPECIFIC      #
        # ------------------------------ #
        facet_sp_id = request.POST.getlist('facet_specific_content')

        if facet_sp_id:
            for facet_id in facet_sp_id:
                facet = get_object_or_404(Facet, id=facet_id)
                z.writestr("{name}.txt".format(name=facet.name),
                           facet.get_facet_download())

        # ------------------------------ #
        #       IF SPECIFIC IMAGES       #
        # ------------------------------ #
        # if not select all or by facet, then user chooses specific images
        images = request.POST.getlist('images')

        images = ImageAsset.objects.filter(pk__in=images)

        if images:
            for image in images:
                z.writestr("{image}.jpg".format(image=image.title),
                           image.photo.read())
                new_info = image.get_asset_download_info()
                image_txt += new_info

        # ------------------------------ #
        #     IF SPECIFIC DOCUMENTS      #
        # ------------------------------ #
        # if not select all or by facet, then user chooses specific documents
        documents = request.POST.getlist('documents')

        documents = DocumentAsset.objects.filter(pk__in=documents)

        if documents:
            for document in documents:
                if document.asset_type == "PDF":
                    z.writestr(
                        "{document}.pdf".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "WORD DOC":
                    z.writestr(
                        "{document}.docx".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "TEXT":
                    z.writestr(
                        "{document}.txt".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "COMMA SEPARATED":
                    z.writestr(
                        "{document}.csv".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info
                if document.asset_type == "EXCEL":
                    z.writestr(
                        "{document}.xls".format(document=document.title),
                        document.document.read())
                    new_info = document.get_asset_download_info()
                    document_txt += new_info

        # ------------------------------ #
        #       IF SPECIFIC AUDIO        #
        # ------------------------------ #
        # if not select all or by facet, then user chooses specific audiofiles
        audiofiles = request.POST.getlist('audiofiles')

        audiofiles = AudioAsset.objects.filter(pk__in=audiofiles)

        if audiofiles:
            for audiofile in audiofiles:
                if audiofile.asset_type == "MP3":
                    z.writestr(
                        "{audiofile}.mp3".format(audiofile=audiofile.title),
                        audiofile.audio.read())
                    new_info = audiofile.get_asset_download_info()
                    audio_txt += new_info
                if audiofile.asset_type == "WAV":
                    z.writestr(
                        "{audiofile}.wav".format(audiofile=audiofile.title),
                        audiofile.audio.read())
                    new_info = audiofile.get_asset_download_info()
                    audio_txt += new_info

        # ------------------------------ #
        #       IF SPECIFIC VIDEO        #
        # ------------------------------ #
        # if not select all or by facet, then user chooses specific video files
        videos = request.POST.getlist('videofiles')

        videos = VideoAsset.objects.filter(pk__in=videos)

        if videos:
            for video in videos:
                if video.asset_type == "YOUTUBE":
                    # text = video.link.encode('utf-8')
                    # title = video.title.encode('utf-8')
                    # z.writestr("{title}_youtube_link.txt".format(title=title), text)
                    new_info = video.get_asset_download_info()
                    video_txt += new_info
                if video.asset_type == "VIMEO":
                    # text = video.link.encode('utf-8')
                    # title = video.title.encode('utf-8')
                    # z.writestr("{title}_vimeo_link.txt".format(title=title), text)
                    new_info = video.get_asset_download_info()
                    video_txt += new_info

        # ------------------------------ #
        #         Create download        #
        # ------------------------------ #
        #Take the final version of asset_txts and write it.
        if image_txt:
            z.writestr("image_details.txt", image_txt)
        if document_txt:
            z.writestr("document_details.txt", document_txt)
        if audio_txt:
            z.writestr("audio_details.txt", audio_txt)
        if video_txt:
            z.writestr("video_details.txt", video_txt)

        z.close()
        fp.seek(0)
        response = HttpResponse(fp, content_type='application/zip')
        fp.close()

        return response
Beispiel #41
0
 def _decode(self,obj,context):
     return _strarray._parse(StringIO(obj),context)
 def test_readrow_ascii(self):
     s = StringIO('"foo"')
     fb_result = UnicodeReader(s).next()
     s.seek(0)
     csv_result = csv.reader(s).next()
     self.assertEquals(fb_result, csv_result)
Beispiel #43
0
    def __init__(self, *args, **kwargs):
        self.log_file = StringIO()
        self._log_buffer_list = []

        super(ERP5BenchmarkResult, self).__init__(*args, **kwargs)
 def test_readrow_unicode(self):
     from fusionbox.unicode_csv import DictReader
     s = StringIO('"test"\r\n"\xe2\x98\x83"')
     fb_reader = DictReader(s)
     self.assertEquals(fb_reader.next(), {'test': u'\u2603'})
Beispiel #45
0
 def encode(self):
     buf = BytesIO()
     buf.write(planar_lidar_t._get_packed_fingerprint())
     self._encode_one(buf)
     return buf.getvalue()
Beispiel #46
0
 def _encode(self,obj,context):
     return _strarray._build(StringIO(obj),context)
Beispiel #47
0
 def get(self, *args, **kwargs):
     self.set_header("Content-Type", "image/png")
     img, chars = Captcha.get(self)
     buf = StringIO()
     img.save(buf, 'PNG', quality=70)
     self.write(buf.getvalue())
Beispiel #48
0
 def get_image(self, source):
     buf = StringIO(source.read())
     return Image.open(buf)
Beispiel #49
0
def skip_leading_wsp(f):
    "Works on a file, returns a file-like object"
    return StringIO("\n".join(map(string.strip, f.readlines())))
Beispiel #50
0
def decodeVersion(payload):

    msg = {}
    decodeData = StringIO(payload)

    msg['version'] = struct.unpack("<i", decodeData.read(4))
    msg['services'] = struct.unpack("<Q", decodeData.read(8))
    msg['timestamp'] = struct.unpack("<Q", decodeData.read(8))

    msg['addr_recv_services'] = struct.unpack("<Q", decodeData.read(8))
    msg['addr_recv_ipv6'] = decodeData.read(12)
    msg['addr_recv_ipv4'] = decodeData.read(4)
    msg['addr_recv_port'] = struct.unpack(">H", decodeData.read(2))

    msg['addr_from_services'] = struct.unpack("<Q", decodeData.read(8))
    msg['addr_from_ipv6'] = decodeData.read(12)
    msg['addr_from_ipv4'] = decodeData.read(4)
    msg['addr_from_port'] = struct.unpack(">H", decodeData.read(2))

    msg['nonce'] = struct.unpack("<Q", decodeData.read(8))

    # Need to add user agent, height, and relay

    logger.info('Version Payload')
    logger.debug('----------------')
    logger.debug('Version: %s', msg['version'][0])
    logger.debug('Services: %s', msg['services'][0])
    logger.debug('Timestamp: %s', datetime.datetime.fromtimestamp(msg['timestamp'][0]).strftime('%Y-%m-%d %H:%M:%S'))

    logger.debug('Addr Services (Recv): %s', msg['addr_recv_services'][0])
    logger.debug('Addr IPv6 (Recv): %s', hexlify(msg['addr_recv_ipv6']))
    #print("Addr IPv4 (Recv): " + str(socket.inet_ntoa(msg['addr_recv_ipv4'])))
    logger.debug('Addr IPv4 (Recv): %s', socket.inet_ntoa(msg['addr_recv_ipv4']))
    logger.debug('Addr Port (Recv): %s', msg['addr_recv_port'][0])

    logger.debug('Addr Services (From): %s', msg['addr_from_services'][0])
    logger.debug('Addr IPv6 (From): %s', hexlify(msg['addr_from_ipv6']))
    #print("Addr IPv4 (From): " + str(socket.inet_ntoa(msg['addr_from_ipv4'])))
    logger.debug('Addr IPv4 (From): %s', socket.inet_ntoa(msg['addr_from_ipv4']))
    logger.debug('Addr Port (Recv): %s', msg['addr_recv_port'][0])

    logger.debug('Nonce : %s', msg['nonce'][0])

    return msg
Beispiel #51
0
 def resolveEntity(self, publicId, systemId):
     return StringIO("<?xml version='1.0' encoding='UTF-8'?>")
Beispiel #52
0
 def encode(self):
     buf = BytesIO()
     buf.write(another_type_t._get_packed_fingerprint())
     self._encode_one(buf)
     return buf.getvalue()
Beispiel #53
0
 def process_message(self, peer, mailfrom, rcpttos, data):
     from cStringIO import StringIO
     from Mailman import Utils
     from Mailman import Message
     from Mailman import MailList
     # If the message is to a Mailman mailing list, then we'll invoke the
     # Mailman script directly, without going through the real smtpd.
     # Otherwise we'll forward it to the local proxy for disposition.
     listnames = []
     for rcpt in rcpttos:
         local = rcpt.lower().split('@')[0]
         # We allow the following variations on the theme
         #   listname
         #   listname-admin
         #   listname-owner
         #   listname-request
         #   listname-join
         #   listname-leave
         parts = local.split('-')
         if len(parts) > 2:
             continue
         listname = parts[0]
         if len(parts) == 2:
             command = parts[1]
         else:
             command = ''
         if not Utils.list_exists(listname) or command not in (
                 '', 'admin', 'owner', 'request', 'join', 'leave'):
             continue
         listnames.append((rcpt, listname, command))
     # Remove all list recipients from rcpttos and forward what we're not
     # going to take care of ourselves.  Linear removal should be fine
     # since we don't expect a large number of recipients.
     for rcpt, listname, command in listnames:
         rcpttos.remove(rcpt)
     # If there's any non-list destined recipients left,
     print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
     if rcpttos:
         refused = self._deliver(mailfrom, rcpttos, data)
         # TBD: what to do with refused addresses?
         print >> DEBUGSTREAM, 'we got refusals:', refused
     # Now deliver directly to the list commands
     mlists = {}
     s = StringIO(data)
     msg = Message.Message(s)
     # These headers are required for the proper execution of Mailman.  All
     # MTAs in existence seem to add these if the original message doesn't
     # have them.
     if not msg.getheader('from'):
         msg['From'] = mailfrom
     if not msg.getheader('date'):
         msg['Date'] = time.ctime(time.time())
     for rcpt, listname, command in listnames:
         print >> DEBUGSTREAM, 'sending message to', rcpt
         mlist = mlists.get(listname)
         if not mlist:
             mlist = MailList.MailList(listname, lock=0)
             mlists[listname] = mlist
         # dispatch on the type of command
         if command == '':
             # post
             msg.Enqueue(mlist, tolist=1)
         elif command == 'admin':
             msg.Enqueue(mlist, toadmin=1)
         elif command == 'owner':
             msg.Enqueue(mlist, toowner=1)
         elif command == 'request':
             msg.Enqueue(mlist, torequest=1)
         elif command in ('join', 'leave'):
             # TBD: this is a hack!
             if command == 'join':
                 msg['Subject'] = 'subscribe'
             else:
                 msg['Subject'] = 'unsubscribe'
             msg.Enqueue(mlist, torequest=1)
Beispiel #54
0
def create_summary(result, packages):
    buff = StringIO()

    buff.write('-' * 80 + '\n')
    buff.write('\033[1m[AGGREGATED TEST RESULTS SUMMARY]\033[0m\n\n')

    errors_failures = [
        r for r in result.test_case_results if r.errors or r.failures
    ]
    if errors_failures:
        buff.write('ERRORS/FAILURES:\n')
        for tc_result in errors_failures:
            buff.write(tc_result.description)

    buff.write("PACKAGES: \n%s\n\n" %
               '\n'.join([" * %s" % p for p in packages]))

    buff.write('\nSUMMARY\n')
    if (result.num_errors + result.num_failures) == 0:
        buff.write("\033[32m * RESULT: SUCCESS\033[0m\n")
    else:
        buff.write("\033[1;31m * RESULT: FAIL\033[0m\n")

    # TODO: still some issues with the numbers adding up if tests fail to launch

    # number of errors from the inner tests, plus add in count for tests
    # that didn't run properly ('result' object).
    buff.write(" * TESTS: %s\n" % result.num_tests)
    if result.num_errors:
        buff.write("\033[1;31m * ERRORS: %s\033[0m\n" % result.num_errors)
    else:
        buff.write(" * ERRORS: 0\n")
    if result.num_failures:
        buff.write("\033[1;31m * FAILURES: %s\033[0m\n" % result.num_failures)
    else:
        buff.write(" * FAILURES: 0\n")
    return buff.getvalue()
    def changelogDiff(self):
        """
        Returns a diff with package changes
        (obtained from the .changes file) of the whole
        group. The 2 newer versions are used to compare
        """
        config = self.config
        view = self.name

        diffidx = None
        if config.has_option(view, 'repodiff'):
            raw = config.get(view, 'repodiff')
            idx = raw.split(',')
            if len(idx) == 2:
                i0 = atoi(idx[0]) - 1
                i1 = atoi(idx[1]) - 1
                if i0 == i1 or i0 < 0 or i1 < 0 or i0 >= len(self.repos) or i1 >= len(self.repos):
                    raise Exception("repodiff index out of range got '%s'" % raw)
                diffidx = (i0, i1)
            else:
                raise Exception("malformed repodiff expecting '<NUMBER>,<NUMBER>' got '%s'" % raw)

        if not self.changelog:
            file_str = StringIO()
            file_str.write("Looking for changes..\n")

            # find higher version per package and where does it come from
            # map package to bigger version
            for package, repovers in self.versions_rev.items():
                res = []
                if diffidx:
                    for e in repovers.items():
                        if e[0] == self.repos[diffidx[0]]:
                            res.insert(0, e)
                        elif e[0] == self.repos[diffidx[1]]:
                            res.append(e)
                else:
                    res = sorted(repovers.items(), lambda x, y: self.packageCompare(package, x, y))
                # now we have a list of tuples (repo, version) for this package
                # we find the last two and ask for the changes file. Take care
                # the package version is not None, and also the changelog isn't.
                # Changelog None indicates the repo does not provide changes (obssr://).
                if len(res) >= 2:
                    idx = len(res) - 1
                    changesnew = None
                    reponew = None
                    while idx >= 0:
                        if res[idx][1]:
                            reponew = res[idx][0]
                            changesnew = self.data[reponew].changelog(package)
                        idx -= 1
                        if changesnew != None:
                            break
                    if changesnew == None:
                        continue

                    changesold = None
                    repoold = None
                    while idx >= 0:
                        if res[idx][1]:
                            repoold = res[idx][0]
                            changesold = self.data[repoold].changelog(package)
                        idx -= 1
                        if changesold != None:
                            break
                    if changesold == None:
                        continue

                    self.changelog_packages.append(package)
                    changesdiff = oscpluginoverview.diff.diff_strings(changesold, changesnew)
                    if not changesdiff:
                        # suppress empty diffs
                        continue

                    from oscpluginoverview.texttable import Texttable
                    table = Texttable()
                    table.set_color(self.colorize)
                    file_str.write(table.colorize_text('B', "+--------------------------------------------------------------------------+\n"))
                    file_str.write(table.colorize_text('B', "------- %s ( %s vs %s )\n" % (package, reponew, repoold)))
                    file_str.write(table.colorize_text('B', "+--------------------------------------------------------------------------+\n"))
                    file_str.write(changesdiff)
                    file_str.write("\n")

            self.changelog = file_str.getvalue()
        # if the changelog was cached
        # just return it
        return self.changelog
Beispiel #56
0
    def test_usage_basic(self):
        old_tools_dir = os.environ.get('IDF_TOOLS_PATH') or os.path.expanduser(idf_tools.IDF_TOOLS_PATH_DEFAULT)

        mirror_prefix_map = None
        if os.path.exists(old_tools_dir):
            mirror_prefix_map = 'https://dl.espressif.com/dl/toolchains/preview,file://' + os.path.join(old_tools_dir, 'dist')
            mirror_prefix_map += ';https://dl.espressif.com/dl,file://' + os.path.join(old_tools_dir, 'dist')
            mirror_prefix_map += ';https://github.com/espressif/.*/releases/download/.*/,file://' + os.path.join(old_tools_dir, 'dist', '')
        if mirror_prefix_map:
            print('Using IDF_MIRROR_PREFIX_MAP={}'.format(mirror_prefix_map))
            os.environ['IDF_MIRROR_PREFIX_MAP'] = mirror_prefix_map

        temp_tools_dir = tempfile.mkdtemp(prefix='idf_tools_tmp')
        print('Using IDF_TOOLS_PATH={}'.format(temp_tools_dir))
        os.environ['IDF_TOOLS_PATH'] = temp_tools_dir

        self.addCleanup(shutil.rmtree, temp_tools_dir)

        output_stream = StringIO()
        with redirect_stdout(output_stream):
            idf_tools.main(['list'])
        output = output_stream.getvalue()

        xtensa_esp32_elf_version = 'esp-2020r3-8.4.0'
        esp32ulp_version = '2.28.51-esp-20191205'

        self.assertIn('* xtensa-esp32-elf:', output)
        self.assertIn('- %s (recommended)' % xtensa_esp32_elf_version, output)
        self.assertIn('* esp32ulp-elf', output)
        self.assertIn('- %s (recommended)' % esp32ulp_version, output)

        output_stream = StringIO()
        with redirect_stdout(output_stream):
            idf_tools.main(['install'])
        output = output_stream.getvalue()

        self.assertIn('Installing esp32ulp-elf@' + esp32ulp_version, output)
        self.assertIn('Downloading binutils-esp32ulp', output)
        self.assertIn('Installing xtensa-esp32-elf@' + xtensa_esp32_elf_version, output)
        self.assertIn('Downloading xtensa-esp32-elf', output)
        self.assertIn('to ' + os.path.join(temp_tools_dir, 'dist'), output)

        output_stream = StringIO()
        with redirect_stdout(output_stream):
            idf_tools.main(['check'])
        output = output_stream.getvalue()

        self.assertIn('version installed in tools directory: ' + esp32ulp_version, output)
        self.assertIn('version installed in tools directory: ' + xtensa_esp32_elf_version, output)

        output_stream = StringIO()
        with redirect_stdout(output_stream):
            idf_tools.main(['export'])
        output = output_stream.getvalue()

        self.assertIn('%s/tools/esp32ulp-elf/%s/esp32ulp-elf-binutils/bin' %
                      (temp_tools_dir, esp32ulp_version), output)
        self.assertIn('%s/tools/xtensa-esp32-elf/%s/xtensa-esp32-elf/bin' %
                      (temp_tools_dir, xtensa_esp32_elf_version), output)
Beispiel #57
0
 def open(self, name, mode='r'):
     if isinstance(self.archive, LocalZipFile):
         return self.archive.open(name)
     return StringIO(self.archive.read(name))
Beispiel #58
0
 def format_exc(limit=None):
     strbuf = StringIO()
     traceback.print_exc(limit, strbuf)
     return strbuf.getvalue()
Beispiel #59
0
 def reset(self):
     self._line = 0
     self._buffer = StringIO()
     self._properly_finished = False
Beispiel #60
0
            print "Error while converting ", file, " to PDF"
    elif file[-3:] == 'pdf':
        try:
            command = "cp " + str(file) + " Consumed/"
            subprocess.call(command, shell=True)
        except:
            pass

to_merge = os.listdir(path)
list_files.sort()
for file in to_merge:
    time.sleep(1)
    file = "ToMerge/" + str(file.replace("  ", "\ "))
    if file[-3:] == 'pdf' and file[9] != '~':
        with open(file, 'rb') as input_file:
            input_buffer = StringIO(input_file.read())
        try:
            print "   appending file...   :", file
            append_pdf(PdfFileReader(input_buffer), output)
        except utils.PdfReadError:
            try:
                print "decompressing pdf  : ", file
                append_pdf(PdfFileReader(decompress_pdf(input_buffer)), output)
            except:
                pass

# Writing all the collected pages to a file
hash = random.getrandbits(128)
output_file = "Output/Combined" + str(hash)[:6] + str(".pdf")
output.write(open(output_file, "wb"))
print "   Generated PDF  : ", output_file