Exemple #1
0
    def test_merged0(self):
        wb = Workbook()
        ws0 = wb.add_sheet("0")

        fnt = Font()
        fnt.name = "Arial"
        fnt.colour_index = 4
        fnt.bold = True

        borders = Borders()
        borders.left = 6
        borders.right = 6
        borders.top = 6
        borders.bottom = 6

        style = XFStyle()
        style.font = fnt
        style.borders = borders

        ws0.write_merge(3, 3, 1, 5, "test1", style)
        ws0.write_merge(4, 10, 1, 5, "test2", style)
        ws0.col(1).width = 0x0D00

        stream = BytesIO()
        wb.save(stream)
        md5 = hashlib.md5()
        md5.update(stream.getvalue())
        self.assertEqual("b55f49cfd1fb786bd611ed5b02b9d16c", md5.hexdigest())
def gzip_text(text):
    contents = BytesIO()
    f = gzip.GzipFile(fileobj=contents, mode='wb')
    f.write(util.encode_text(text))
    f.flush()
    f.close()
    return contents.getvalue()
Exemple #3
0
  def _serve_compressed_histograms(self, query_params):
    """Given a tag and single run, return an array of compressed histograms."""
    tag = query_params.get('tag')
    run = query_params.get('run')
    compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
    if query_params.get('format') == _OutputFormat.CSV:
      string_io = BytesIO()
      writer = csv.writer(string_io)

      # Build the headers; we have two columns for timing and two columns for
      # each compressed histogram bucket.
      headers = ['Wall time', 'Step']
      if compressed_histograms:
        bucket_count = len(compressed_histograms[0].compressed_histogram_values)
        for i in xrange(bucket_count):
          headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
      writer.writerow(headers)

      for compressed_histogram in compressed_histograms:
        row = [compressed_histogram.wall_time, compressed_histogram.step]
        for value in compressed_histogram.compressed_histogram_values:
          row += [value.rank_in_bps, value.value]
        writer.writerow(row)
      self._send_csv_response(string_io.getvalue())
    else:
      self._send_json_response(compressed_histograms)
Exemple #4
0
    def test_dates(self):
        w = Workbook()
        ws = w.add_sheet("Hey, Dude")

        fmts = [
            "M/D/YY",
            "D-MMM-YY",
            "D-MMM",
            "MMM-YY",
            "h:mm AM/PM",
            "h:mm:ss AM/PM",
            "h:mm",
            "h:mm:ss",
            "M/D/YY h:mm",
            "mm:ss",
            "[h]:mm:ss",
            "mm:ss.0",
        ]

        i = 0
        for fmt in fmts:
            ws.write(i, 0, fmt)

            style = XFStyle()
            style.num_format_str = fmt

            ws.write(i, 4, datetime(2013, 5, 10), style)

            i += 1

        stream = BytesIO()
        w.save(stream)
        md5 = hashlib.md5()
        md5.update(stream.getvalue())
        self.assertEqual("82fed69d4f9ea0444d159fa30080f0a3", md5.hexdigest())
Exemple #5
0
    def OnFileViewHTML(self, evt):
        # Get an instance of the html file handler, use it to save the
        # document to a StringIO stream, and then display the
        # resulting html text in a dialog with a HtmlWindow.
        handler = rt.RichTextHTMLHandler()
        handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
        handler.SetFontSizeMapping([7,9,11,12,14,22,100])

        stream = BytesIO()
        if not handler.SaveStream(self.rtc.GetBuffer(), stream):
            return

        import wx.html
        dlg = wx.Dialog(self, title="HTML", style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
        html = wx.html.HtmlWindow(dlg, size=(500,400), style=wx.BORDER_SUNKEN)
        html.SetPage(stream.getvalue())
        btn = wx.Button(dlg, wx.ID_CANCEL)
        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(html, 1, wx.ALL|wx.EXPAND, 5)
        sizer.Add(btn, 0, wx.ALL|wx.CENTER, 10)
        dlg.SetSizer(sizer)
        sizer.Fit(dlg)

        dlg.ShowModal()

        handler.DeleteTemporaryImages()
Exemple #6
0
 def clone(self):
     f = BytesIO()
     self.save(f)
     c = Config()
     f.seek(0, 0)
     c.load(f)
     return c
Exemple #7
0
    def visit_immutation(self, node, children):
        context = self._final_context()
        child_type = children[0].expr_name

        if child_type == 'preview':
            if self.tool == 'httpie':
                command = ['http'] + context.httpie_args(self.method,
                                                         quote=True)
            else:
                assert self.tool == 'curl'
                command = ['curl'] + context.curl_args(self.method, quote=True)
            click.echo(' '.join(command))
        elif child_type == 'action':
            output = BytesIO()
            try:
                env = Environment(stdout=output, is_windows=False)
                httpie_main(context.httpie_args(self.method), env=env)
                content = output.getvalue()
            finally:
                output.close()

            # XXX: Work around a bug of click.echo_via_pager(). When you pass
            # a bytestring to echo_via_pager(), it converts the bytestring with
            # str(b'abc'), which makes it "b'abc'".
            if six.PY2:
                content = unicode(content, 'utf-8')  # noqa
            else:
                content = str(content, 'utf-8')
            click.echo_via_pager(content)

        return node
Exemple #8
0
 def __iter__(self):
     if self.is_zipped:
         byte_stream = BytesIO(self.response.content)
         with zipfile.ZipFile(byte_stream) as self.zipfile:
             for name in self.zipfile.namelist():
                 with self.zipfile.open(name) as single_file:
                     if name[-3:] == 'csv':
                         reader = csv.reader(single_file, delimiter=self.delimiter)
                     else:
                         reader = single_file
                     reader_iterator = iter(reader)
                     if self.is_header_present:
                         next(reader_iterator)
                     for line in reader_iterator:
                         yield self._parse_line(line)
         byte_stream.close()
     else:
         stream = codecs.iterdecode(self.response.iter_lines(),
                                    self.response.encoding or self.response.apparent_encoding)
         reader = csv.reader(stream, delimiter=self.delimiter)
         reader_iterator = iter(reader)
         if self.is_header_present:
             next(reader_iterator)
         for line in reader_iterator:
             yield self._parse_line(line)
         stream.close()
 def make_options_body(self):
     options_buf = BytesIO()
     write_stringmultimap(options_buf, {
         'CQL_VERSION': ['3.0.1'],
         'COMPRESSION': []
     })
     return options_buf.getvalue()
    def test_disable_compression(self, *args):
        c = self.make_connection()
        c._callbacks = {0: c._handle_options_response}
        c.defunct = Mock()
        # disable compression
        c.compression = False

        locally_supported_compressions.pop('lz4', None)
        locally_supported_compressions.pop('snappy', None)
        locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
        locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')

        # read in a SupportedMessage response
        header = self.make_header_prefix(SupportedMessage)

        # the server only supports snappy
        options_buf = BytesIO()
        write_stringmultimap(options_buf, {
            'CQL_VERSION': ['3.0.3'],
            'COMPRESSION': ['snappy', 'lz4']
        })
        options = options_buf.getvalue()

        message = self.make_msg(header, options)
        c.process_msg(message, len(message) - 8)

        self.assertEqual(c.decompressor, None)
Exemple #11
0
def generate_glassbrain_image(image_pk):
    from neurovault.apps.statmaps.models import Image
    import neurovault
    import matplotlib as mpl
    mpl.rcParams['savefig.format'] = 'jpg'
    my_dpi = 50
    fig = plt.figure(figsize=(330.0/my_dpi, 130.0/my_dpi), dpi=my_dpi)
    
    img = Image.objects.get(pk=image_pk)    
    f = BytesIO()
    try:
        glass_brain = plot_glass_brain(img.file.path, figure=fig)
        glass_brain.savefig(f, dpi=my_dpi)
    except:
        # Glass brains that do not produce will be given dummy image
        this_path = os.path.abspath(os.path.dirname(__file__))
        f = open(os.path.abspath(os.path.join(this_path,
                                              "static","images","glass_brain_empty.jpg"))) 
        raise
    finally:
        plt.close('all')
        f.seek(0)
        content_file = ContentFile(f.read())
        img.thumbnail.save("glass_brain_%s.jpg" % img.pk, content_file)
        img.save()
Exemple #12
0
def test_encode_decode_empty_string():
    # This is a regression test for
    # https://github.com/luispedro/jug/issues/39
    s = BytesIO()
    jug.backends.encode.encode_to('', s)
    val = jug.backends.encode.decode_from(BytesIO(s.getvalue()))
    assert val == ''
    def test_requested_compression_not_available(self, *args):
        c = self.make_connection()
        c._callbacks = {0: c._handle_options_response}
        c.defunct = Mock()
        # request lz4 compression
        c.compression = "lz4"

        locally_supported_compressions.pop('lz4', None)
        locally_supported_compressions.pop('snappy', None)
        locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
        locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')

        # read in a SupportedMessage response
        header = self.make_header_prefix(SupportedMessage)

        # the server only supports snappy
        options_buf = BytesIO()
        write_stringmultimap(options_buf, {
            'CQL_VERSION': ['3.0.3'],
            'COMPRESSION': ['snappy']
        })
        options = options_buf.getvalue()

        message = self.make_msg(header, options)
        c.process_msg(message, len(message) - 8)

        # make sure it errored correctly
        c.defunct.assert_called_once_with(ANY)
        args, kwargs = c.defunct.call_args
        self.assertIsInstance(args[0], ProtocolError)
Exemple #14
0
def recvbytes(self, bytes_needed, sock_buf = None):
    """
        Atomic read of bytes_needed bytes.

        This function either returns exactly the nmber of bytes requested in a
        StringIO buffer, None, or raises a socket error.

        If the return value is None, it means the socket is closed by the other side.
    """
    if sock_buf is None:
        sock_buf = BytesIO()
    bytes_count = 0
    while bytes_count < bytes_needed:
        chunk = self.recv(min(bytes_needed - bytes_count, 32768))
        part_count = len(chunk)

        if type(chunk) == str:
            chunk = b(chunk)

        if part_count < 1:
            return None

        bytes_count += part_count
        sock_buf.write(chunk)

    return sock_buf
Exemple #15
0
    def filter(self, im):
        if self.sharpness:
            im = sharpen(im, self.sharpness)
        buf = BytesIO()
        if self.palette:
            if im.mode in ('RGBA', 'LA'):
                alpha = im.split()[3]
                alpha = Image.eval(alpha, lambda a: 255 if a > 0 else 0)
                mask = Image.eval(alpha, lambda a: 255 if a == 0 else 0)

                matte = Image.new("RGBA", im.size, self.background)
                matte.paste(im, (0, 0), im)
                matte = matte.convert("RGB").convert(
                    'P', palette=Image.ADAPTIVE, colors=self.colors - 1)
                matte.paste(self.colors, mask)
                matte.save(buf, "PNG", transparency=self.colors)
            elif im.mode not in ('P'):
                im = im.convert('P', palette=Image.ADAPTIVE,
                                colors=self.colors)
                im.save(buf, 'PNG')
            else:
                im.save(buf, 'PNG')
        else:
            if not im.mode.startswith("RGB"):
                im = im.convert('RGB')
            im.save(buf, 'PNG')
        buf.seek(0)
        return buf
Exemple #16
0
def compress_string(s):

    # avg_block_size is acutally the reciporical of the average
    # intended interflush distance.   

    rnd = Random(s)

    flushes_remaining = FLUSH_LIMIT

    if len(s) < AVERAGE_SPAN_BETWEEN_FLUSHES * APPROX_MIN_FLUSHES:
        avg_block_size = APPROX_MIN_FLUSHES / float(len(s) + 1)
    else:
        avg_block_size = 1.0 / AVERAGE_SPAN_BETWEEN_FLUSHES

    s = StringIO(s) if isinstance(s, six.text_type) else BytesIO(s)
    zbuf = BytesIO()
    zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
    chunk = s.read(MIN_INTERFLUSH_INTERVAL + int(rnd.expovariate(avg_block_size)))
    while chunk and flushes_remaining:
        zfile.write(chunk)
        zfile.flush()
        flushes_remaining -= 1
        chunk = s.read(MIN_INTERFLUSH_INTERVAL + int(rnd.expovariate(avg_block_size)))
    zfile.write(chunk)
    zfile.write(s.read())
    zfile.close()
    return zbuf.getvalue()
Exemple #17
0
 def test_get(self):
     image = Image.new('RGBA', (8, 8))
     image.paste((255, 0, 0, 0), (0, 0, 4, 4))
     image.paste((0, 255, 0, 0), (0, 4, 4, 8))
     image.paste((0, 0, 255, 0), (4, 0, 8, 4))
     image.paste((0, 0, 0, 255), (4, 4, 8, 8))
     string_io = StringIO()
     image.save(string_io, 'PNG')
     tile = Tile(TileCoord(1, 0, 0, 2), data=string_io.getvalue())
     tiles = list(self.mtsts.get([tile]))
     self.assertEqual(len(tiles), 4)
     self.assertEqual(tiles[0].tilecoord, TileCoord(1, 0, 0))
     image = Image.open(StringIO(tiles[0].data))
     self.assertEqual(image.size, (2, 2))
     self.assertEqual(image.getcolors(), [(4, (255, 0, 0, 0))])
     self.assertEqual(tiles[1].tilecoord, TileCoord(1, 0, 1))
     image = Image.open(StringIO(tiles[1].data))
     self.assertEqual(image.size, (2, 2))
     self.assertEqual(image.getcolors(), [(4, (0, 255, 0, 0))])
     self.assertEqual(tiles[2].tilecoord, TileCoord(1, 1, 0))
     image = Image.open(StringIO(tiles[2].data))
     self.assertEqual(image.size, (2, 2))
     self.assertEqual(image.getcolors(), [(4, (0, 0, 255, 0))])
     self.assertEqual(tiles[3].tilecoord, TileCoord(1, 1, 1))
     image = Image.open(StringIO(tiles[3].data))
     self.assertEqual(image.size, (2, 2))
     self.assertEqual(image.getcolors(), [(4, (0, 0, 0, 255))])
    def test_simple_proguard_upload(self):
        project = self.create_project(name='foo')

        url = reverse('sentry-api-0-dsym-files', kwargs={
            'organization_slug': project.organization.slug,
            'project_slug': project.slug,
        })

        self.login_as(user=self.user)

        out = BytesIO()
        f = zipfile.ZipFile(out, 'w')
        f.writestr('proguard/%s.txt' % PROGUARD_UUID, PROGUARD_SOURCE)
        f.writestr('ignored-file.txt', b'This is just some stuff')
        f.close()

        response = self.client.post(url, {
            'file': SimpleUploadedFile('symbols.zip', out.getvalue(),
                                       content_type='application/zip'),
        }, format='multipart')

        assert response.status_code == 201, response.content
        assert len(response.data) == 1
        assert response.data[0]['headers'] == {
            'Content-Type': 'text/x-proguard+plain'
        }
        assert response.data[0]['sha1'] == 'e6d3c5185dac63eddfdc1a5edfffa32d46103b44'
        assert response.data[0]['uuid'] == PROGUARD_UUID
        assert response.data[0]['objectName'] == 'proguard-mapping'
        assert response.data[0]['cpuName'] == 'any'
        assert response.data[0]['symbolType'] == 'proguard'
Exemple #19
0
    def serialize(
            self, destination=None, encoding="utf-8", format='xml', **args):

        if self.type in ('CONSTRUCT', 'DESCRIBE'):
            return self.graph.serialize(
                destination, encoding=encoding, format=format, **args)

        """stolen wholesale from graph.serialize"""
        from rdflib import plugin
        serializer = plugin.get(format, ResultSerializer)(self)
        if destination is None:
            stream = BytesIO()
            stream2 = EncodeOnlyUnicode(stream)
            serializer.serialize(stream2, encoding=encoding, **args)
            return stream.getvalue()
        if hasattr(destination, "write"):
            stream = destination
            serializer.serialize(stream, encoding=encoding, **args)
        else:
            location = destination
            scheme, netloc, path, params, query, fragment = urlparse(location)
            if netloc != "":
                print("WARNING: not saving as location" +
                      "is not a local file reference")
                return
            fd, name = tempfile.mkstemp()
            stream = os.fdopen(fd, 'wb')
            serializer.serialize(stream, encoding=encoding, **args)
            stream.close()
            if hasattr(shutil, "move"):
                shutil.move(name, path)
            else:
                shutil.copy(name, path)
                os.remove(name)
    def test_use_requested_compression(self, *args):
        c = self.make_connection()
        c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)}
        c.defunct = Mock()
        # request snappy compression
        c.compression = "snappy"

        locally_supported_compressions.pop('lz4', None)
        locally_supported_compressions.pop('snappy', None)
        locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
        locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')

        # read in a SupportedMessage response
        header = self.make_header_prefix(SupportedMessage)

        # the server only supports snappy
        options_buf = BytesIO()
        write_stringmultimap(options_buf, {
            'CQL_VERSION': ['3.0.3'],
            'COMPRESSION': ['snappy', 'lz4']
        })
        options = options_buf.getvalue()

        c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)

        self.assertEqual(c.decompressor, locally_supported_compressions['snappy'][1])
    def test_xmlrunner_check_for_valid_xml_streamout(self):
        """
        This test checks if the xml document is valid if there are more than
        one testsuite and the output of the report is a single stream.
        """
        class DummyTestA(unittest.TestCase):

            def test_pass(self):
                pass

        class DummyTestB(unittest.TestCase):

            def test_pass(self):
                pass

        suite = unittest.TestSuite()
        suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestA))
        suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestB))
        outdir = BytesIO()
        runner = xmlrunner.XMLTestRunner(
            stream=self.stream, output=outdir, verbosity=self.verbosity,
            **self.runner_kwargs)
        runner.run(suite)
        outdir.seek(0)
        output = outdir.read()
        # Finally check if we have a valid XML document or not.
        try:
            minidom.parseString(output)
        except Exception as e:  # pragma: no cover
            # note: we could remove the try/except, but it's more crude.
            self.fail(e)
Exemple #22
0
    def test_save_model_with_writable_caches(self):
        # If one or both cache elements are read-only, no saving.
        expected_mean_vec = numpy.array([1, 2, 3])
        expected_rotation = numpy.eye(3)

        expected_mean_vec_bytes = BytesIO()
        # noinspection PyTypeChecker
        numpy.save(expected_mean_vec_bytes, expected_mean_vec)
        expected_mean_vec_bytes = expected_mean_vec_bytes.getvalue()

        expected_rotation_bytes = BytesIO()
        # noinspection PyTypeChecker
        numpy.save(expected_rotation_bytes, expected_rotation)
        expected_rotation_bytes = expected_rotation_bytes.getvalue()

        itq = ItqFunctor()
        itq.mean_vec = expected_mean_vec
        itq.rotation = expected_rotation
        itq.mean_vec_cache_elem = DataMemoryElement(readonly=False)
        itq.rotation_cache_elem = DataMemoryElement(readonly=False)

        itq.save_model()
        self.assertEqual(itq.mean_vec_cache_elem.get_bytes(),
                         expected_mean_vec_bytes)
        self.assertEqual(itq.rotation_cache_elem.get_bytes(),
                         expected_rotation_bytes)
Exemple #23
0
def gzip(f, *args, **kwargs):
    """GZip Flask Response Decorator."""

    data = f(*args, **kwargs)

    if isinstance(data, Response):
        content = data.data
    else:
        content = data

    gzip_buffer = BytesIO()
    gzip_file = gzip2.GzipFile(
        mode='wb',
        compresslevel=4,
        fileobj=gzip_buffer
    )
    gzip_file.write(content)
    gzip_file.close()

    gzip_data = gzip_buffer.getvalue()

    if isinstance(data, Response):
        data.data = gzip_data
        data.headers['Content-Encoding'] = 'gzip'
        data.headers['Content-Length'] = str(len(data.data))

        return data

    return gzip_data
Exemple #24
0
class FakeResponse(object):
    """A fake HTTPResponse object for testing."""

    def __init__(self, code, body, headers=None):
        self.code = code
        self.msg = str(code)
        if headers is None:
            headers = {}
        self.headers = headers
        self.info = lambda: self.headers
        if isinstance(body, six.text_type):
            body = body.encode('utf-8')
        self.body_file = BytesIO(body)

    def read(self):
        """Read the entire response body."""
        return self.body_file.read()

    def readline(self):
        """Read a single line from the response body."""
        return self.body_file.readline()

    def close(self):
        """Close the connection."""
        pass
 def default(self, obj):
     try:
         return super(ObjectJSONEncoder, self).default(obj)
     except TypeError as e:
         if "not JSON serializable" not in str(e):
             raise
         if isinstance(obj, datetime.datetime):
             return {'ISO8601_datetime': obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z')}
         if isinstance(obj, datetime.date):
             return {'ISO8601_date': obj.isoformat()}
         if numpy is not None and isinstance(obj, numpy.ndarray) and obj.ndim == 1:
             memfile = BytesIO()
             numpy.save(memfile, obj)
             memfile.seek(0)
             serialized = json.dumps(memfile.read().decode('latin-1'))
             d = {
                 '__ndarray__': serialized,
             }
             return d
         else:
             d = {
                 '__class__': obj.__class__.__qualname__,
                 '__module__': obj.__module__,
             }
             return d
Exemple #26
0
def is_zipstream(data):
    """
    just like zipfile.is_zipfile, but works upon buffers and streams
    rather than filenames.

    If data supports the read method, it will be treated as a stream
    and read from to test whether it is a valid ZipFile.

    If data also supports the tell and seek methods, it will be
    rewound after being tested.
    """

    if isinstance(data, (str, buffer)):
        data = BytesIO(data)

    if hasattr(data, "read"):
        tell = 0
        if hasattr(data, "tell"):
            tell = data.tell()

        try:
            result = bool(_EndRecData(data))
        except IOError:
            result = False

        if hasattr(data, "seek"):
            data.seek(tell)

    else:
        raise TypeError("requies str, buffer, or stream-like object")

    return result
def deepCopy(obj):
    stream = BytesIO()
    p = Pickler(stream, 1)
    p.dump(obj)
    stream.seek(0)
    u = Unpickler(stream)
    return u.load()
Exemple #28
0
    def serialize(self, xid=None):
        buff = BytesIO()

        formats = []
        data = []
        if xid is not None:
            formats.append(Int.fmt)
            data.append(xid)
        if self.opcode:
            formats.append(Int.fmt)
            data.append(self.opcode)

        for request in self.requests:
            header = MultiHeader(type=request.opcode, done=False, error=-1)
            header_format, header_data = header.render()
            formats.append(header_format)
            data.extend(header_data)

            payload_format, payload_data = request.render()
            formats.append(payload_format)
            data.extend(payload_data)

        footer = MultiHeader(type=-1, done=True, error=-1)
        footer_format, footer_data = footer.render()
        formats.append(footer_format)
        data.extend(footer_data)

        buff.write(struct.pack("!" + "".join(formats), *data))

        return buff.getvalue()
Exemple #29
0
def test_decode_response_gzip():
    body = b'gzip message'

    buf = BytesIO()
    f = gzip.GzipFile('a', fileobj=buf, mode='wb')
    f.write(body)
    f.close()

    compressed_body = buf.getvalue()
    buf.close()
    gzip_response = {
        'body': {'string': compressed_body},
        'headers': {
            'access-control-allow-credentials': ['true'],
            'access-control-allow-origin': ['*'],
            'connection': ['keep-alive'],
            'content-encoding': ['gzip'],
            'content-length': ['177'],
            'content-type': ['application/json'],
            'date': ['Wed, 02 Dec 2015 19:44:32 GMT'],
            'server': ['nginx']
        },
        'status': {'code': 200, 'message': 'OK'}
    }
    decoded_response = decode_response(gzip_response)
    assert decoded_response['body']['string'] == body
    assert decoded_response['headers']['content-length'] == [str(len(body))]
    def test_requested_compression_not_available(self, *args):
        c = self.make_connection()
        c._requests = {0: (c._handle_options_response, ProtocolHandler.decode_message)}
        c.defunct = Mock()
        # request lz4 compression
        c.compression = "lz4"

        locally_supported_compressions.pop('lz4', None)
        locally_supported_compressions.pop('snappy', None)
        locally_supported_compressions['lz4'] = ('lz4compress', 'lz4decompress')
        locally_supported_compressions['snappy'] = ('snappycompress', 'snappydecompress')

        # read in a SupportedMessage response
        header = self.make_header_prefix(SupportedMessage)

        # the server only supports snappy
        options_buf = BytesIO()
        write_stringmultimap(options_buf, {
            'CQL_VERSION': ['3.0.3'],
            'COMPRESSION': ['snappy']
        })
        options = options_buf.getvalue()

        c.process_msg(_Frame(version=4, flags=0, stream=0, opcode=SupportedMessage.opcode, body_offset=9, end_pos=9 + len(options)), options)

        # make sure it errored correctly
        c.defunct.assert_called_once_with(ANY)
        args, kwargs = c.defunct.call_args
        self.assertIsInstance(args[0], ProtocolError)
def validate_junit_report(text):
    document = etree.parse(BytesIO(text))
    JUnitSchema.assertValid(document)
Exemple #32
0
def test_video_events_on_download_create(api_app, webhooks, db, api_project,
                                         access_token, json_headers):
    """Test deposit events."""
    (project, video_1, video_2) = api_project
    video_1_depid = video_1['_deposit']['id']
    project_id = str(project.id)
    video_1_id = str(video_1.id)
    bucket_id = video_1._bucket.id

    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_list',
                      receiver_id='downloader',
                      access_token=access_token)

    with mock.patch('requests.get') as mock_request, \
            mock.patch('invenio_indexer.tasks.index_record.delay') \
            as mock_indexer, \
            api_app.test_client() as client:
        file_size = 1024 * 1024
        mock_request.return_value = type(
            'Response', (object, ), {
                'raw': BytesIO(b'\x00' * file_size),
                'headers': {
                    'Content-Length': file_size
                }
            })

        payload = dict(uri='http://example.com/test.pdf',
                       bucket_id=str(bucket_id),
                       deposit_id=video_1_depid,
                       key='test.pdf')

        resp = client.post(url, headers=json_headers, data=json.dumps(payload))
        assert resp.status_code == 201

        file_size = 1024 * 1024 * 6
        mock_request.return_value = type(
            'Response', (object, ), {
                'raw': BytesIO(b'\x00' * file_size),
                'headers': {
                    'Content-Length': file_size
                }
            })

        resp = client.post(url, headers=json_headers, data=json.dumps(payload))
        assert resp.status_code == 201

        deposit = deposit_video_resolver(video_1_depid)

        events = get_deposit_events(deposit['_deposit']['id'])

        assert len(events) == 2
        assert events[0].payload['deposit_id'] == video_1_depid
        assert events[1].payload['deposit_id'] == video_1_depid

        status = get_tasks_status_by_task(events)
        assert status == {'file_download': states.SUCCESS}

        # check if the states are inside the deposit
        res = client.get(url_for('invenio_deposit_rest.video_item',
                                 pid_value=video_1_depid,
                                 access_token=access_token),
                         headers=json_headers)
        assert res.status_code == 200
        data = json.loads(res.data.decode('utf-8'))['metadata']
        assert data['_cds']['state']['file_download'] == states.SUCCESS
        assert deposit._get_files_dump() == data['_files']

        # check the record is inside the indexer queue
        ids = set(get_indexed_records_from_mock(mock_indexer))
        assert len(ids) == 2
        assert video_1_id in ids
        assert project_id in ids
Exemple #33
0
def test_video_events_on_download_check_index(api_app, webhooks, db,
                                              api_project, access_token,
                                              json_headers, users):
    """Test deposit events."""
    (project, video_1, video_2) = api_project
    prepare_videos_for_publish([video_1, video_2])
    project_depid = project['_deposit']['id']
    video_1_depid = video_1['_deposit']['id']
    bucket_id = video_1._bucket.id

    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_list',
                      receiver_id='downloader',
                      access_token=access_token)

    with mock.patch('requests.get') as mock_request, \
            api_app.test_client() as client:
        login_user_via_session(client, email=User.query.get(users[0]).email)

        file_size = 1024 * 1024
        mock_request.return_value = type(
            'Response', (object, ), {
                'raw': BytesIO(b'\x00' * file_size),
                'headers': {
                    'Content-Length': file_size
                }
            })

        payload = dict(uri='http://example.com/test.pdf',
                       bucket_id=str(bucket_id),
                       deposit_id=video_1_depid,
                       key='test.pdf')

        resp = client.post(url, headers=json_headers, data=json.dumps(payload))
        assert resp.status_code == 201

        # run indexer
        RecordIndexer().process_bulk_queue()
        sleep(2)

        deposit = deposit_video_resolver(video_1_depid)
        file_dumps = deposit._get_files_dump()
        assert len(file_dumps) == 1

        def search_record(url):
            res = client.get(url, headers=json_headers)
            assert res.status_code == 200
            data = json.loads(
                res.data.decode('utf-8'))['hits']['hits'][0]['metadata']
            return data

        # check if the tasks states and files are inside elasticsearch
        # -> check video
        url_video_deposit = url_for('invenio_deposit_rest.video_list',
                                    q='_deposit.id:{0}'.format(video_1_depid),
                                    access_token=access_token)
        data = search_record(url_video_deposit)
        assert data['_cds']['state']['file_download'] == states.SUCCESS
        assert file_dumps == data['_files']
        # -> check project
        url_project_deposit = url_for(
            'invenio_deposit_rest.project_list',
            q='_deposit.id:{0}'.format(project_depid),
            access_token=access_token)
        search_record(url_project_deposit)
        assert data['_cds']['state']['file_download'] == states.SUCCESS

        # [[ EDIT VIDEO ]]
        deposit = deposit_video_resolver(video_1_depid)
        video_edited = deepcopy(deposit)
        del video_edited['_files']
        del video_edited['_cds']['state']
        reset_oauth2()
        res = client.put(url_for('invenio_deposit_rest.video_item',
                                 pid_value=video_1_depid),
                         data=json.dumps(video_edited),
                         headers=json_headers)
        assert res.status_code == 200

        # check if the tasks states and files are inside elasticsearch
        # -> check video
        data = search_record(url_video_deposit)
        assert data['_cds']['state']['file_download'] == states.SUCCESS
        assert file_dumps == data['_files']
        # -> check project
        url_project_deposit = url_for(
            'invenio_deposit_rest.project_list',
            q='_deposit.id:{0}'.format(project_depid),
            access_token=access_token)
        search_record(url_project_deposit)
        assert data['_cds']['state']['file_download'] == states.SUCCESS

        # [[ PUBLISH THE PROJECT ]]
        reset_oauth2()
        res = client.post(url_for(
            'invenio_deposit_rest.project_actions',
            pid_value=project['_deposit']['id'],
            action='publish',
        ),
                          headers=json_headers)
        assert res.status_code == 202

        # run indexer
        RecordIndexer().process_bulk_queue()
        sleep(2)

        deposit = deposit_video_resolver(video_1_depid)

        # check if the files are inside elasticsearch
        # -> check video deposit
        data = search_record(url_video_deposit)
        assert data['_cds']['state']['file_download'] == states.SUCCESS
        assert file_dumps == data['_files']
        # check video record
        pid, record = deposit.fetch_published()
        url = url_for('invenio_records_rest.recid_list',
                      q='_deposit.pid.value:{0}'.format(pid.pid_value))
        data = search_record(url)
        assert record['_files'] == data['_files']
Exemple #34
0
def test_bucket_create_publish(api_client, deposit, json_auth_headers,
                               deposit_url, get_json, license_record,
                               auth_headers, minimal_deposit):
    """Test bucket features on deposit publish."""
    client = api_client
    headers = json_auth_headers
    auth = auth_headers

    # Create deposit
    res = client.post(deposit_url,
                      data=json.dumps(minimal_deposit),
                      headers=headers)
    links = get_json(res, code=201)['links']
    current_search.flush_and_refresh(index='deposits')

    # Upload file
    res = client.put(
        links['bucket'] + '/test.txt',
        input_stream=BytesIO(b'testfile'),
        headers=auth,
    )
    assert res.status_code == 200

    # Publish deposit
    res = client.post(links['publish'], headers=auth)
    data = get_json(res, code=202)

    # Bucket should be locked.
    res = client.put(
        links['bucket'] + '/newfile.txt',
        input_stream=BytesIO(b'testfile'),
        headers=auth,
    )
    assert res.status_code == 403

    # Get deposit.
    res = client.get(links['self'], headers=auth)
    assert res.status_code == 200

    # Get record.
    res = client.get(data['links']['record'])
    data = get_json(res, code=200)

    # Assert record and deposit bucket is not identical.
    assert data['links']['bucket'] != links['bucket']

    # Get record bucket.
    res = client.get(data['links']['bucket'])
    assert res.status_code == 200

    # Get file in bucket.
    res = client.get(data['links']['bucket'] + '/test.txt')
    assert res.status_code == 200

    # Record bucket is also locked.
    res = client.put(
        data['links']['bucket'] + '/newfile.txt',
        input_stream=BytesIO(b'testfile'),
        headers=auth,
    )
    assert res.status_code == 404

    # Delete deposit not allowed
    res = client.delete(links['self'], headers=auth)
    assert res.status_code == 403
Exemple #35
0
def test_bucket_new_version(api_client, deposit, json_auth_headers,
                            deposit_url, get_json, license_record,
                            auth_headers, minimal_deposit):
    """Test bucket features on record new version."""
    client = api_client
    headers = json_auth_headers
    auth = auth_headers

    # Create deposit
    res = client.post(deposit_url,
                      data=json.dumps(minimal_deposit),
                      headers=headers)
    links = get_json(res, code=201)['links']
    current_search.flush_and_refresh(index='deposits')

    # Upload file
    res = client.put(
        links['bucket'] + '/test.txt',
        input_stream=BytesIO(b'testfile'),
        headers=auth,
    )
    assert res.status_code == 200

    # Publish deposit
    res = client.post(links['publish'], headers=auth)
    data = get_json(res, code=202)

    # Get record
    res = client.get(data['links']['record'])
    data = get_json(res, code=200)
    rec_v1_bucket = data['links']['bucket']

    # Get deposit
    res = client.get(links['self'], headers=auth)
    links = get_json(res, code=200)['links']
    dep_v1_bucket = links['bucket']

    # Create new version
    res = client.post(links['newversion'], headers=auth)
    data = get_json(res, code=201)

    # Get new version deposit
    res = client.get(data['links']['latest_draft'], headers=auth)
    data = get_json(res, code=200)
    dep_v2_publish = data['links']['publish']
    dep_v2_bucket = data['links']['bucket']

    # Assert that all the buckets are different
    assert len(set([rec_v1_bucket, dep_v1_bucket, dep_v2_bucket])) == 3

    # Get file from old version deposit bucket
    res = client.get(dep_v1_bucket + '/test.txt', headers=auth)
    dep_v1_file_data = res.get_data(as_text=True)

    # Get file from old version record bucket
    res = client.get(rec_v1_bucket + '/test.txt')
    rec_v1_file_data = res.get_data(as_text=True)

    # Get file from new version deposit bucket
    res = client.get(dep_v2_bucket + '/test.txt', headers=auth)
    dep_v2_file_data = res.get_data(as_text=True)

    # Assert that the file is the same in the new version
    assert rec_v1_file_data == dep_v1_file_data == dep_v2_file_data

    # Record bucket is unlocked.
    res = client.put(
        dep_v2_bucket + '/newfile.txt',
        input_stream=BytesIO(b'testfile2'),
        headers=auth,
    )
    assert res.status_code == 200

    # Deleting files in new version deposit bucket is allowed
    res = client.delete(dep_v2_bucket + '/newfile.txt', headers=auth)
    assert res.status_code == 204

    # Try to publish the new version
    # Should fail (400), since the bucket contents is the same
    res = client.post(dep_v2_publish, headers=auth)
    data = get_json(res, code=400)

    # Add another file, so that the bucket has a different content
    res = client.put(
        dep_v2_bucket + '/newfile2.txt',
        input_stream=BytesIO(b'testfile3'),
        headers=auth,
    )
    assert res.status_code == 200

    # Publish new version deposit
    res = client.post(dep_v2_publish, headers=auth)
    data = get_json(res, code=202)

    # Get record
    res = client.get(data['links']['record'])
    data = get_json(res, code=200)
    rec_v2_bucket = data['links']['bucket']

    # Assert that all the buckets are different
    assert len(
        set([rec_v1_bucket, rec_v2_bucket, dep_v1_bucket, dep_v2_bucket])) == 4

    # Create another new version
    res = client.post(links['newversion'], headers=auth)
    data = get_json(res, code=201)

    # Get new version deposit
    res = client.get(data['links']['latest_draft'], headers=auth)
    data = get_json(res, code=200)

    dep_v3_bucket = data['links']['bucket']
    dep_v3_publish = data['links']['publish']

    # Try to publish the new version without changes (should fail as before)
    res = client.post(dep_v3_publish, headers=auth)
    data = get_json(res, code=400)

    # Deleting the file from v2 should be possible, but publishing should
    # also fail since the contents will be the same as the very first version.
    res = client.delete(dep_v3_bucket + '/newfile2.txt', headers=auth)
    assert res.status_code == 204

    res = client.post(dep_v3_publish, headers=auth)
    data = get_json(res, code=400)
Exemple #36
0
 def test_run_sbmlexport(self):
     with redirected_stdout(BytesIO()):
         main(args=['--model', self._model_dir, 'sbmlexport'])
Exemple #37
0
 def setup(self):
     self.rfile = BytesIO()
     self.wfile = BytesIO()
     self.requestline = ''
Exemple #38
0
 def test_iter(self):
     data = to_bytes("A line\nAnother line\nA final line\n")
     input_wrapper = InputWrapper(BytesIO(data))
     self.assertEquals(to_bytes("").join(input_wrapper), data, '')
def test_init():
    bio = BytesIO()
    shape = [2, 3, 4]
    dtype = np.int32
    arr = np.arange(24, dtype=dtype).reshape(shape)
    bio.seek(16)
    bio.write(arr.tostring(order='F'))
    hdr = FunkyHeader(shape)
    ap = ArrayProxy(bio, hdr)
    assert_true(ap.file_like is bio)
    assert_equal(ap.shape, shape)
    # shape should be read only
    assert_raises(AttributeError, setattr, ap, 'shape', shape)
    # Get the data
    assert_array_equal(np.asarray(ap), arr)
    # Check we can modify the original header without changing the ap version
    hdr.shape[0] = 6
    assert_not_equal(ap.shape, shape)
    # Data stays the same, also
    assert_array_equal(np.asarray(ap), arr)
    # C order also possible
    bio = BytesIO()
    bio.seek(16)
    bio.write(arr.tostring(order='C'))
    ap = CArrayProxy(bio, FunkyHeader((2, 3, 4)))
    assert_array_equal(np.asarray(ap), arr)
Exemple #40
0
def test_detect_encoding_exception(app):
    f = BytesIO(u'Γκρήκ Στρίνγκ'.encode('utf-8'))

    with patch('cchardet.detect', Exception):
        assert detect_encoding(f) is None
 def encode(self, text_utf8, text_latex, inputenc=None, errors='strict'):
     encoding = 'latex+' + inputenc if inputenc else 'latex'
     stream = BytesIO()
     writer = codecs.getwriter(encoding)(stream, errors=errors)
     writer.write(text_utf8)
     self.assertEqual(text_latex, stream.getvalue())
def GetMondrianImage():
    stream = BytesIO(GetMondrianData())
    return wx.Image(stream)
Exemple #43
0
 def dumps(cls, env):
     # type: (BuildEnvironment) -> unicode
     io = BytesIO()
     cls.dump(env, io)
     return io.getvalue()
def test_proxy_slicing():
    shapes = (15, 16, 17)
    for n_dim in range(1, len(shapes) + 1):
        shape = shapes[:n_dim]
        arr = np.arange(np.prod(shape)).reshape(shape)
        for offset in (0, 20):
            hdr = Nifti1Header()
            hdr.set_data_offset(offset)
            hdr.set_data_dtype(arr.dtype)
            hdr.set_data_shape(shape)
            for order, klass in ('F', ArrayProxy), ('C', CArrayProxy):
                fobj = BytesIO()
                fobj.write(b'\0' * offset)
                fobj.write(arr.tostring(order=order))
                prox = klass(fobj, hdr)
                for sliceobj in slicer_samples(shape):
                    assert_array_equal(arr[sliceobj], prox[sliceobj])
    # Check slicing works with scaling
    hdr.set_slope_inter(2.0, 1.0)
    fobj = BytesIO()
    fobj.write(b'\0' * offset)
    fobj.write(arr.tostring(order='F'))
    prox = ArrayProxy(fobj, hdr)
    sliceobj = (None, slice(None), 1, -1)
    assert_array_equal(arr[sliceobj] * 2.0 + 1.0, prox[sliceobj])
Exemple #45
0
    def _gen_request(self,
                     method,
                     url,
                     params=utils.NoDefault,
                     headers=None,
                     extra_environ=None,
                     status=None,
                     upload_files=None,
                     expect_errors=False,
                     content_type=None):
        """
        Do a generic request.
        """

        if method == 'DELETE' and params is not utils.NoDefault:
            warnings.warn(('You are not supposed to send a body in a '
                           'DELETE request. Most web servers will ignore it'),
                          lint.WSGIWarning)

        environ = self._make_environ(extra_environ)

        inline_uploads = []

        # this supports OrderedDict
        if isinstance(params, dict) or hasattr(params, 'items'):
            params = list(params.items())

        if isinstance(params, (list, tuple)):
            inline_uploads = [
                v for (k, v) in params
                if isinstance(v, (forms.File, forms.Upload))
            ]

        if len(inline_uploads) > 0:
            content_type, params = self.encode_multipart(
                params, upload_files or ())
            environ['CONTENT_TYPE'] = content_type
        else:
            params = utils.encode_params(params, content_type)
            if upload_files or \
                (content_type and
                 to_bytes(content_type).startswith(b'multipart')):
                params = cgi.parse_qsl(params, keep_blank_values=True)
                content_type, params = self.encode_multipart(
                    params, upload_files or ())
                environ['CONTENT_TYPE'] = content_type
            elif params:
                environ.setdefault('CONTENT_TYPE',
                                   str('application/x-www-form-urlencoded'))

        if content_type is not None:
            environ['CONTENT_TYPE'] = content_type
        environ['REQUEST_METHOD'] = str(method)
        url = str(url)
        url = self._remove_fragment(url)
        req = self.RequestClass.blank(url, environ)
        if isinstance(params, text_type):
            params = params.encode(req.charset or 'utf8')
        req.environ['wsgi.input'] = BytesIO(params)
        req.content_length = len(params)
        if headers:
            req.headers.update(headers)
        return self.do_request(req, status=status, expect_errors=expect_errors)
 def decode(self, text_utf8, text_latex, inputenc=None):
     encoding = 'latex+' + inputenc if inputenc else 'latex'
     stream = BytesIO(text_latex)
     reader = codecs.getreader(encoding)(stream)
     self.assertEqual(text_utf8, reader.read())
Exemple #47
0
        def roundtrip_through_xml(a):
            xmlbytes = a.to_xml_bytes()

            buf = BytesIO(xmlbytes)
            return Analysis.from_xml(buf)
Exemple #48
0
 def loads(cls, string, app=None):
     # type: (unicode, Sphinx) -> BuildEnvironment
     io = BytesIO(string)
     return cls.load(io, app)
Exemple #49
0
    def method(self, **kwargs):
        # Don't bother with doc string, it will be over-written by createMethod.

        for name in six.iterkeys(kwargs):
            if name not in parameters.argmap:
                raise TypeError('Got an unexpected keyword argument "%s"' %
                                name)

        # Remove args that have a value of None.
        keys = list(kwargs.keys())
        for name in keys:
            if kwargs[name] is None:
                del kwargs[name]

        for name in parameters.required_params:
            if name not in kwargs:
                raise TypeError('Missing required parameter "%s"' % name)

        for name, regex in six.iteritems(parameters.pattern_params):
            if name in kwargs:
                if isinstance(kwargs[name], six.string_types):
                    pvalues = [kwargs[name]]
                else:
                    pvalues = kwargs[name]
                for pvalue in pvalues:
                    if re.match(regex, pvalue) is None:
                        raise TypeError(
                            'Parameter "%s" value "%s" does not match the pattern "%s"'
                            % (name, pvalue, regex))

        for name, enums in six.iteritems(parameters.enum_params):
            if name in kwargs:
                # We need to handle the case of a repeated enum
                # name differently, since we want to handle both
                # arg='value' and arg=['value1', 'value2']
                if (name in parameters.repeated_params
                        and not isinstance(kwargs[name], six.string_types)):
                    values = kwargs[name]
                else:
                    values = [kwargs[name]]
                for value in values:
                    if value not in enums:
                        raise TypeError(
                            'Parameter "%s" value "%s" is not an allowed value in "%s"'
                            % (name, value, str(enums)))

        actual_query_params = {}
        actual_path_params = {}
        for key, value in six.iteritems(kwargs):
            to_type = parameters.param_types.get(key, 'string')
            # For repeated parameters we cast each member of the list.
            if key in parameters.repeated_params and type(value) == type([]):
                cast_value = [_cast(x, to_type) for x in value]
            else:
                cast_value = _cast(value, to_type)
            if key in parameters.query_params:
                actual_query_params[parameters.argmap[key]] = cast_value
            if key in parameters.path_params:
                actual_path_params[parameters.argmap[key]] = cast_value
        body_value = kwargs.get('body', None)
        media_filename = kwargs.get('media_body', None)

        if self._developerKey:
            actual_query_params['key'] = self._developerKey

        model = self._model
        if methodName.endswith('_media'):
            model = MediaModel()
        elif 'response' not in methodDesc:
            model = RawModel()

        headers = {}
        headers, params, query, body = model.request(headers,
                                                     actual_path_params,
                                                     actual_query_params,
                                                     body_value)

        expanded_url = uritemplate.expand(pathUrl, params)
        url = _urljoin(self._baseUrl, expanded_url + query)

        resumable = None
        multipart_boundary = ''

        if media_filename:
            # Ensure we end up with a valid MediaUpload object.
            if isinstance(media_filename, six.string_types):
                (media_mime_type,
                 encoding) = mimetypes.guess_type(media_filename)
                if media_mime_type is None:
                    raise UnknownFileType(media_filename)
                if not mimeparse.best_match([media_mime_type],
                                            ','.join(accept)):
                    raise UnacceptableMimeTypeError(media_mime_type)
                media_upload = MediaFileUpload(media_filename,
                                               mimetype=media_mime_type)
            elif isinstance(media_filename, MediaUpload):
                media_upload = media_filename
            else:
                raise TypeError('media_filename must be str or MediaUpload.')

            # Check the maxSize
            if media_upload.size(
            ) is not None and media_upload.size() > maxSize > 0:
                raise MediaUploadSizeError("Media larger than: %s" % maxSize)

            # Use the media path uri for media uploads
            expanded_url = uritemplate.expand(mediaPathUrl, params)
            url = _urljoin(self._baseUrl, expanded_url + query)
            if media_upload.resumable():
                url = _add_query_parameter(url, 'uploadType', 'resumable')

            if media_upload.resumable():
                # This is all we need to do for resumable, if the body exists it gets
                # sent in the first request, otherwise an empty body is sent.
                resumable = media_upload
            else:
                # A non-resumable upload
                if body is None:
                    # This is a simple media upload
                    headers['content-type'] = media_upload.mimetype()
                    body = media_upload.getbytes(0, media_upload.size())
                    url = _add_query_parameter(url, 'uploadType', 'media')
                else:
                    # This is a multipart/related upload.
                    msgRoot = MIMEMultipart('related')
                    # msgRoot should not write out it's own headers
                    setattr(msgRoot, '_write_headers', lambda self: None)

                    # attach the body as one part
                    msg = MIMENonMultipart(*headers['content-type'].split('/'))
                    msg.set_payload(body)
                    msgRoot.attach(msg)

                    # attach the media as the second part
                    msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
                    msg['Content-Transfer-Encoding'] = 'binary'

                    payload = media_upload.getbytes(0, media_upload.size())
                    msg.set_payload(payload)
                    msgRoot.attach(msg)
                    # encode the body: note that we can't use `as_string`, because
                    # it plays games with `From ` lines.
                    fp = BytesIO()
                    g = _BytesGenerator(fp, mangle_from_=False)
                    g.flatten(msgRoot, unixfrom=False)
                    body = fp.getvalue()

                    multipart_boundary = msgRoot.get_boundary()
                    headers['content-type'] = (
                        'multipart/related; '
                        'boundary="%s"') % multipart_boundary
                    url = _add_query_parameter(url, 'uploadType', 'multipart')

        logger.info('URL being requested: %s %s' % (httpMethod, url))
        return self._requestBuilder(self._http,
                                    model.response,
                                    url,
                                    method=httpMethod,
                                    body=body,
                                    headers=headers,
                                    methodId=methodId,
                                    resumable=resumable)
Exemple #50
0
 def parse_xml_bytes(self, xmlbytes):
     f = BytesIO(xmlbytes)
     a = Analysis.from_xml(f)
     f.close()
     return a
 def _compress(self, text):
     contents = BytesIO()
     gz_fh = gzip.GzipFile(mode='wb', fileobj=contents)
     gz_fh.write(text)
     gz_fh.close()
     return contents.getvalue()
Exemple #52
0
 def for_string(cls, string, modname, srcname='<string>'):
     # type: (unicode, unicode, unicode) -> ModuleAnalyzer
     if isinstance(string, bytes):
         return cls(BytesIO(string), modname, srcname)
     return cls(StringIO(string), modname, srcname, decoded=True)
Exemple #53
0
def put_contract_document(self):
    from six import BytesIO
    from urllib import quote

    response = self.app.patch_json(
        "/agreements/{}?acc_token={}".format(self.agreement_id,
                                             self.agreement_token),
        {"data": {
            "status": "active"
        }})
    self.assertEqual(response.status, "200 OK")

    body = u"""--BOUNDARY\nContent-Disposition: form-data; name="file"; filename={}\nContent-Type: application/msword\n\ncontent\n""".format(
        u"\uff07")
    environ = self.app._make_environ()
    environ["CONTENT_TYPE"] = "multipart/form-data; boundary=BOUNDARY"
    environ["REQUEST_METHOD"] = "POST"
    req = self.app.RequestClass.blank(
        self.app._remove_fragment("/agreements/{}/documents".format(
            self.agreement_id)), environ)
    req.environ["wsgi.input"] = BytesIO(body.encode("utf8"))
    req.content_length = len(body)
    response = self.app.do_request(req, status=422)
    self.assertEqual(response.status, "422 Unprocessable Entity")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(response.json["errors"][0]["description"],
                     "could not decode params")

    body = u"""--BOUNDARY\nContent-Disposition: form-data; name="file"; filename*=utf-8''{}\nContent-Type: application/msword\n\ncontent\n""".format(
        quote("укр.doc"))
    environ = self.app._make_environ()
    environ["CONTENT_TYPE"] = "multipart/form-data; boundary=BOUNDARY"
    environ["REQUEST_METHOD"] = "POST"
    req = self.app.RequestClass.blank(
        self.app._remove_fragment(
            "/agreements/{}/documents?acc_token={}".format(
                self.agreement_id, self.agreement_token)),
        environ,
    )
    req.environ["wsgi.input"] = BytesIO(body.encode(req.charset or "utf8"))
    req.content_length = len(body)
    response = self.app.do_request(req)
    self.assertEqual(response.status, "201 Created")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(u"укр.doc", response.json["data"]["title"])
    doc_id = response.json["data"]["id"]
    dateModified = response.json["data"]["dateModified"]
    self.assertIn(doc_id, response.headers["Location"])

    response = self.app.put(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        upload_files=[("file", "name  name.doc", "content2")],
    )
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(doc_id, response.json["data"]["id"])
    if self.docservice:
        self.assertIn("Signature=", response.json["data"]["url"])
        self.assertIn("KeyID=", response.json["data"]["url"])
        self.assertNotIn("Expires=", response.json["data"]["url"])
        key = response.json["data"]["url"].split("/")[-1].split("?")[0]
        contract = self.db.get(self.agreement_id)
        self.assertIn(key, contract["documents"][-1]["url"])
        self.assertIn("Signature=", contract["documents"][-1]["url"])
        self.assertIn("KeyID=", contract["documents"][-1]["url"])
        self.assertNotIn("Expires=", contract["documents"][-1]["url"])
    response = self.app.get("/agreements/{}/documents/{}".format(
        self.agreement_id, doc_id))
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(doc_id, response.json["data"]["id"])
    self.assertEqual("name name.doc", response.json["data"]["title"])
    dateModified2 = response.json["data"]["dateModified"]
    self.assertTrue(dateModified < dateModified2)
    self.assertEqual(
        dateModified,
        response.json["data"]["previousVersions"][0]["dateModified"])

    response = self.app.get("/agreements/{}/documents?all=true".format(
        self.agreement_id))
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(dateModified, response.json["data"][0]["dateModified"])
    self.assertEqual(dateModified2, response.json["data"][1]["dateModified"])

    response = self.app.post(
        "/agreements/{}/documents?acc_token={}".format(self.agreement_id,
                                                       self.agreement_token),
        upload_files=[("file", "name.doc", "content")],
    )
    self.assertEqual(response.status, "201 Created")
    self.assertEqual(response.content_type, "application/json")
    doc_id = response.json["data"]["id"]
    dateModified = response.json["data"]["dateModified"]
    self.assertIn(doc_id, response.headers["Location"])

    response = self.app.get("/agreements/{}/documents".format(
        self.agreement_id))
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(dateModified2, response.json["data"][0]["dateModified"])
    self.assertEqual(dateModified, response.json["data"][1]["dateModified"])
    response = self.app.put(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        status=404,
        upload_files=[("invalid_name", "name.doc", "content")],
    )
    self.assertEqual(response.status, "404 Not Found")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(response.json["status"], "error")
    self.assertEqual(response.json["errors"], [{
        u"description": u"Not Found",
        u"location": u"body",
        u"name": u"file"
    }])
    response = self.app.put(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        "content3",
        content_type="application/msword",
    )
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")
    self.assertEqual(doc_id, response.json["data"]["id"])
    if self.docservice:
        self.assertIn("Signature=", response.json["data"]["url"])
        self.assertIn("KeyID=", response.json["data"]["url"])
        self.assertNotIn("Expires=", response.json["data"]["url"])
        key = response.json["data"]["url"].split("/")[-1].split("?")[0]
        contract = self.db.get(self.agreement_id)
        self.assertIn(key, contract["documents"][-1]["url"])
        self.assertIn("Signature=", contract["documents"][-1]["url"])
        self.assertIn("KeyID=", contract["documents"][-1]["url"])
        self.assertNotIn("Expires=", contract["documents"][-1]["url"])
    else:
        key = response.json["data"]["url"].split("?")[-1].split("=")[-1]
    if self.docservice:
        response = self.app.get("/agreements/{}/documents/{}".format(
            self.agreement_id, doc_id, key))
        self.assertEqual(response.status, "200 OK")
        self.assertEqual(response.content_type, "application/json")

    response = self.app.get("/agreements/{}/documents".format(
        self.agreement_id, self.agreement_token))
    self.assertEqual(response.status, "200 OK")
    doc_id = response.json["data"][0]["id"]
    response = self.app.patch_json(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        {"data": {
            "documentType": None
        }},
    )
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.content_type, "application/json")

    response = self.app.patch_json(
        "/agreements/{}?acc_token={}".format(self.agreement_id,
                                             self.agreement_token),
        {"data": {
            "status": "terminated"
        }},
        content_type="application/json",
    )
    self.assertEqual(response.status, "200 OK")
    self.assertEqual(response.json["data"]["status"], "terminated")
    response = self.app.put(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        "contentX",
        content_type="application/msword",
        status=403,
    )
    self.assertEqual(response.status, "403 Forbidden")
    self.assertEqual(
        response.json["errors"],
        [{
            u"description":
            u"Can't update document in current (terminated) agreement status",
            u"location": u"body",
            u"name": u"data",
        }],
    )
    #  document in current (terminated}) agreement status
    response = self.app.patch_json(
        "/agreements/{}/documents/{}?acc_token={}".format(
            self.agreement_id, doc_id, self.agreement_token),
        {"data": {
            "documentType": None
        }},
        status=403,
    )
    self.assertEqual(response.status, "403 Forbidden")
    self.assertEqual(
        response.json["errors"],
        [{
            u"description":
            u"Can't update document in current (terminated) agreement status",
            u"location": u"body",
            u"name": u"data",
        }],
    )
Exemple #54
0
 def get_qrcode(self, data):
     img = qrcode.make(data, image_factory=SvgPathImage)
     buf = BytesIO()
     img.save(buf)
     return buf.getvalue()
Exemple #55
0
    def test_extract_metadata(self):
        self.app.register('HEAD', '/v1/a/c?extract-archive=tar', HTTPNoContent,
                          {}, None)
        self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar',
                          HTTPCreated, {}, None)
        self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar',
                          HTTPCreated, {}, None)

        # It's a real pain to instantiate TarInfo objects directly; they
        # really want to come from a file on disk or a tarball. So, we write
        # out some files and add pax headers to them as they get placed into
        # the tarball.
        with open(os.path.join(self.testdir, "obj1"), "w") as fh1:
            fh1.write("obj1 contents\n")
        with open(os.path.join(self.testdir, "obj2"), "w") as fh2:
            fh2.write("obj2 contents\n")

        tar_ball = BytesIO()
        tar_file = tarfile.TarFile.open(fileobj=tar_ball,
                                        mode="w",
                                        format=tarfile.PAX_FORMAT)

        # With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with
        # extended attribute user.thingy = dingy gets put into the tarfile
        # with pax_headers containing key/value pair
        # (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type
        # unicode, not type str).
        #
        # With BSD tar (libarchive), you get key/value pair
        # (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as
        # gratuitous incompatibility.
        #
        # Still, we'll support uploads with both. Just heap more code on the
        # problem until you can forget it's under there.
        with open(os.path.join(self.testdir, "obj1")) as fh1:
            tar_info1 = tar_file.gettarinfo(fileobj=fh1, arcname="obj1")
            tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \
                u'application/food-diary'
            tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \
                u'sopa de albóndigas'
            tar_info1.pax_headers[
                u'SCHILY.xattr.user.meta.afternoon-snack'] = \
                u'gigantic bucket of coffee'
            tar_file.addfile(tar_info1, fh1)

        with open(os.path.join(self.testdir, "obj2")) as fh2:
            tar_info2 = tar_file.gettarinfo(fileobj=fh2, arcname="obj2")
            tar_info2.pax_headers[
                u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert'
            tar_info2.pax_headers[
                u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy'
            tar_info2.pax_headers[
                u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped'
            tar_file.addfile(tar_info2, fh2)

        tar_ball.seek(0)

        req = Request.blank('/v1/a/c?extract-archive=tar')
        req.environ['REQUEST_METHOD'] = 'PUT'
        req.environ['wsgi.input'] = tar_ball
        req.headers['transfer-encoding'] = 'chunked'
        req.headers['accept'] = 'application/json;q=1.0'

        resp = req.get_response(self.bulk)
        self.assertEqual(resp.status_int, 200)

        # sanity check to make sure the upload worked
        upload_status = utils.json.loads(resp.body)
        self.assertEqual(upload_status['Number Files Created'], 2)

        put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2])
        self.assertEqual(put1_headers.get('Content-Type'),
                         'application/food-diary')
        self.assertEqual(put1_headers.get('X-Object-Meta-Lunch'),
                         'sopa de alb\xc3\xb3ndigas')
        self.assertEqual(put1_headers.get('X-Object-Meta-Afternoon-Snack'),
                         'gigantic bucket of coffee')

        put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2])
        self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert')
        self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy')
        self.assertEqual(put2_headers.get('Content-Type'), None)
        self.assertEqual(put2_headers.get('X-Object-Meta-Blah'), None)
Exemple #56
0
def download_search(info):
    dltype = info.get('Submit')
    delim = 'bracket'
    com = r'\\'  # single line comment start
    com1 = ''  # multiline comment start
    com2 = ''  # multiline comment end
    filename = 'fields.gp'
    mydate = time.strftime("%d %B %Y")
    if dltype == 'sage':
        com = '#'
        filename = 'fields.sage'
    if dltype == 'mathematica':
        com = ''
        com1 = '(*'
        com2 = '*)'
        delim = 'brace'
        filename = 'fields.ma'
    if dltype == 'magma':
        com = ''
        com1 = '/*'
        com2 = '*/'
        delim = 'magma'
        filename = 'fields.m'
    s = com1 + "\n"
    s += com + ' Global number fields downloaded from the LMFDB downloaded %s\n' % mydate
    s += com + ' Below is a list called data. Each entry has the form:\n'
    s += com + '   [label, polynomial, discriminant, t-number, class group]\n'
    s += com + ' Here the t-number is for the Galois group\n'
    s += com + ' If a class group was not computed, the entry is [-1]\n'
    s += '\n' + com2
    s += '\n'
    if dltype == 'magma':
        s += 'data := ['
    else:
        s += 'data = ['
    s += '\\\n'
    Qx = PolynomialRing(QQ, 'x')
    # reissue saved query here
    res = db.nf_fields.search(ast.literal_eval(info["query"]))
    for f in res:
        pol = Qx(f['coeffs'])
        D = f['disc_abs'] * f['disc_sign']
        gal_t = int(f['galois_label'].split('T')[1])
        if 'class_group' in f:
            cl = f['class_group']
        else:
            cl = [-1]
        entry = ', '.join([
            '"' + str(f['label']) + '"',
            str(pol),
            str(D),
            str(gal_t),
            str(cl)
        ])
        s += '[' + entry + ']' + ',\\\n'
    s = s[:-3]
    if dltype == 'gp':
        s += '];\n'
    else:
        s += ']\n'
    if delim == 'brace':
        s = s.replace('[', '{')
        s = s.replace(']', '}')
    if delim == 'magma':
        s = s.replace('[', '[*')
        s = s.replace(']', '*]')
        s += ';'
    strIO = BytesIO()
    strIO.write(s.encode('utf-8'))
    strIO.seek(0)
    return send_file(strIO,
                     attachment_filename=filename,
                     as_attachment=True,
                     add_etags=False)
Exemple #57
0
    def test_tail_empty(self):
        """tail of empty object returns empty"""
        (actual, offset) = tail(BytesIO(), 1024, 1024)

        self.assertEqual(actual, b'')
        self.assertEqual(offset, 0)
def input_fn(serialized_input_data, content_type):
    print('Deserializing the input data.')
    if content_type == CONTENT_TYPE:
        stream = BytesIO(serialized_input_data)
        return np.load(stream)
    raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
Exemple #59
0
def download_search(info):
    dltype = info['Submit']
    delim = 'bracket'
    com = r'\\'  # single line comment start
    com1 = ''  # multiline comment start
    com2 = ''  # multiline comment end
    filename = 'elliptic_curves.gp'
    mydate = time.strftime("%d %B %Y")
    if dltype == 'sage':
        com = '#'
        filename = 'elliptic_curves.sage'
    if dltype == 'magma':
        com = ''
        com1 = '/*'
        com2 = '*/'
        delim = 'magma'
        filename = 'elliptic_curves.m'
    s = com1 + "\n"
    s += com + ' Elliptic curves downloaded from the LMFDB downloaded on %s.\n' % (
        mydate)
    s += com + ' Below is a list called data. Each entry has the form:\n'
    s += com + '   [[field_poly],[Weierstrass Coefficients, constant first in increasing degree]]\n'
    s += '\n' + com2
    s += '\n'

    if dltype == 'magma':
        s += 'P<x> := PolynomialRing(Rationals()); \n'
        s += 'data := ['
    elif dltype == 'sage':
        s += 'R.<x> = QQ[]; \n'
        s += 'data = [ '
    else:
        s += 'data = [ '
    s += '\\\n'
    nf_dict = {}
    for f in db.ec_nfcurves.search(ast.literal_eval(info["query"]),
                                   ['field_label', 'ainvs']):
        nf = str(f['field_label'])
        # look up number field and see if we already have the min poly
        if nf in nf_dict:
            poly = nf_dict[nf]
        else:
            poly = str(WebNumberField(f['field_label']).poly())
            nf_dict[nf] = poly
        entry = str(f['ainvs'])
        entry = entry.replace('u', '')
        entry = entry.replace('\'', '')
        entry = entry.replace(';', '],[')
        s += '[[' + poly + '], [[' + entry + ']]],\\\n'
    s = s[:-3]
    s += ']\n'

    if delim == 'brace':
        s = s.replace('[', '{')
        s = s.replace(']', '}')
    if delim == 'magma':
        s = s.replace('[', '[*')
        s = s.replace(']', '*]')
        s += ';'
    strIO = BytesIO()
    strIO.write(s.encode('utf-8'))
    strIO.seek(0)
    return send_file(strIO,
                     attachment_filename=filename,
                     as_attachment=True,
                     add_etags=False)
Exemple #60
0
    def test_tail_noop(self):
        """tail returns all content if it fits in requested size"""
        (actual, offset) = tail(BytesIO(SAMPLE_STRING), 1024, 1024)

        self.assertEqual(actual, SAMPLE_STRING)
        self.assertEqual(offset, len(SAMPLE_STRING))