Exemple #1
0
def test_read_opts():
    # tests if read is seeing option sets, at initialization and after
    # initialization
    arr = np.arange(6).reshape(1,6)
    stream = BytesIO()
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    back_dict = rdr.get_variables()
    rarr = back_dict['a']
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, squeeze_me=True)
    assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
    rdr.squeeze_me = False
    assert_array_equal(rarr, arr)
    rdr = MatFile5Reader(stream, byte_order=boc.native_code)
    assert_array_equal(rdr.get_variables()['a'], arr)
    # inverted byte code leads to error on read because of swapped
    # header etc
    rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
    assert_raises(Exception, rdr.get_variables)
    rdr.byte_order = boc.native_code
    assert_array_equal(rdr.get_variables()['a'], arr)
    arr = np.array(['a string'])
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': arr})
    rdr = MatFile5Reader(stream)
    assert_array_equal(rdr.get_variables()['a'], arr)
    rdr = MatFile5Reader(stream, chars_as_strings=False)
    carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
    assert_array_equal(rdr.get_variables()['a'], carr)
    rdr.chars_as_strings = True
    assert_array_equal(rdr.get_variables()['a'], arr)
Exemple #2
0
def tar_stream(store, tree, mtime, format=''):
    """Generate a tar stream for the contents of a Git tree.

    Returns a generator that lazily assembles a .tar.gz archive, yielding it in
    pieces (bytestrings). To obtain the complete .tar.gz binary file, simply
    concatenate these chunks.

    :param store: Object store to retrieve objects from
    :param tree: Tree object for the tree root
    :param mtime: UNIX timestamp that is assigned as the modification time for
        all files
    :param format: Optional compression format for tarball
    :return: Bytestrings
    """
    buf = BytesIO()
    with closing(tarfile.open(None, "w:%s" % format, buf)) as tar:
        for entry_abspath, entry in _walk_tree(store, tree):
            try:
                blob = store[entry.sha]
            except KeyError:
                # Entry probably refers to a submodule, which we don't yet support.
                continue
            data = ChunkedBytesIO(blob.chunked)

            info = tarfile.TarInfo()
            info.name = entry_abspath.decode('ascii') # tarfile only works with ascii.
            info.size = blob.raw_length()
            info.mode = entry.mode
            info.mtime = mtime

            tar.addfile(info, data)
            yield buf.getvalue()
            buf.truncate(0)
            buf.seek(0)
    yield buf.getvalue()
Exemple #3
0
def test_empty_string():
    # make sure reading empty string does not raise error
    estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
    fp = open(estring_fname, 'rb')
    rdr = MatFile5Reader(fp)
    d = rdr.get_variables()
    fp.close()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    # empty string round trip.  Matlab cannot distiguish
    # between a string array that is empty, and a string array
    # containing a single empty string, because it stores strings as
    # arrays of char.  There is no way of having an array of char that
    # is not empty, but contains an empty string.
    stream = BytesIO()
    savemat(stream, {'a': np.array([''])})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': np.array([], dtype='U1')})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.close()
Exemple #4
0
class CairoContext:
    def __init__(self, width, height):
        self.outbuf  = BytesIO()
        self.surface = cairo.PDFSurface(self.outbuf, width, height)
        self.context = cairo.Context(self.surface)

    def set_size(self, w, h):
        self.surface.set_size(w, h)

    def render(self, svg):
        if not svg.render_cairo(self.context):
            log.critical("SVG failed to render")
        else:
            # flush the page to the drawing surface
            self.surface.show_page()

    def finish(self):
        "Closes the drawing surface and pushes the last data out"
        self.surface.finish()
        return self.flush()

    def flush(self):
        "Returns data from the buffer and resets buffer to 0"
        out = self.outbuf.getvalue()
        self.outbuf.seek(0)
        self.outbuf.truncate(0)
        return out
Exemple #5
0
class TruncatedTailPipe(object):
    """
    Truncate the last `tail_size` bytes from the stream.
    """

    def __init__(self, output=None, tail_size=16):
        self.tail_size = tail_size
        self.output = output or BytesIO()
        self.buffer = BytesIO()

    def write(self, data):
        self.buffer.write(data)
        if self.buffer.tell() > self.tail_size:
            self._truncate_tail()

    def _truncate_tail(self):
        overflow_size = self.buffer.tell() - self.tail_size
        self.buffer.seek(0)
        self.output.write(self.buffer.read(overflow_size))
        remaining = self.buffer.read()
        self.buffer.seek(0)
        self.buffer.write(remaining)
        self.buffer.truncate()

    def close(self):
        return self.output
Exemple #6
0
class UnicodeWriter(object):
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """

    def __init__(self, dialect=csv.excel_tab, encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = BytesIO() if six.PY2 else StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)

    def _encode(self, item):
        if six.PY2:
            encoded = unicode(item).encode('utf-8')
        else:
            encoded = str(item)

        return encoded

    def writerow(self, row):
        self.writer.writerow([self._encode(s) for s in row])

    def get_values(self):
        # Fetch UTF-8 output from the queue ...
        ret = self.queue.getvalue()
        # empty queue
        self.queue.truncate(0)
        if six.PY2:
            return ret.lstrip(b'\0')
        return ret.encode('utf-8').lstrip(b'\0')

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Exemple #7
0
class StreamCapturer(Thread):
    started = False
    def __init__(self):
        super(StreamCapturer, self).__init__()
        self.streams = []
        self.buffer = BytesIO()
        self.streams_lock = Lock()
        self.buffer_lock = Lock()
        self.stream_added = Event()
        self.stop = Event()

    def run(self):
        self.started = True
        while not self.stop.is_set():
            with self.streams_lock:
                streams = self.streams

            if not streams:
                self.stream_added.wait(timeout=1)
                self.stream_added.clear()
                continue

            ready = select(streams, [], [], 0.5)[0]
            dead = []
            with self.buffer_lock:
                for fd in ready:
                    try:
                        self.buffer.write(os.read(fd, 1024))
                    except OSError as e:
                        import errno
                        if e.errno == errno.EBADF:
                            dead.append(fd)
                        else:
                            raise

            with self.streams_lock:
                for fd in dead:
                    self.streams.remove(fd)
    
    def add_stream(self, fd):
        with self.streams_lock:
            self.streams.append(fd)
        self.stream_added.set()
    
    def remove_stream(self, fd):
        with self.streams_lock:
            self.streams.remove(fd)
        
    def reset_buffer(self):
        with self.buffer_lock:
            self.buffer.truncate(0)
            self.buffer.seek(0)
    
    def get_buffer(self):
        with self.buffer_lock:
            return self.buffer.getvalue()
    
    def ensure_started(self):
        if not self.started:
            self.start()
Exemple #8
0
        def generator():
            # python2 needs byes when python3 needs unicode
            if six.PY2:
                stringio = BytesIO()
            else:
                stringio = StringIO()
            csv_writer = csv.writer(stringio)

            it = iter(resultdb.select(project))
            first_30 = []
            for result in it:
                first_30.append(result)
                if len(first_30) >= 30:
                    break
            common_fields, _ = result_formater(first_30)
            common_fields_l = sorted(common_fields)

            csv_writer.writerow([toString('url')]
                                + [toString(x) for x in common_fields_l]
                                + [toString('...')])
            for result in itertools.chain(first_30, it):
                other = {}
                for k, v in iteritems(result['result']):
                    if k not in common_fields:
                        other[k] = v
                csv_writer.writerow(
                    [toString(result['url'])]
                    + [toString(result['result'].get(k, '')) for k in common_fields_l]
                    + [toString(other)]
                )
                yield stringio.getvalue()
                stringio.truncate(0)
Exemple #9
0
class ChunkBuffer:
    BUFFER_SIZE = 1 * 1024 * 1024

    def __init__(self, key, chunker_params=CHUNKER_PARAMS):
        self.buffer = BytesIO()
        self.packer = msgpack.Packer(unicode_errors='surrogateescape')
        self.chunks = []
        self.key = key
        self.chunker = Chunker(self.key.chunk_seed, *chunker_params)

    def add(self, item):
        self.buffer.write(self.packer.pack(StableDict(item)))
        if self.is_full():
            self.flush()

    def write_chunk(self, chunk):
        raise NotImplementedError

    def flush(self, flush=False):
        if self.buffer.tell() == 0:
            return
        self.buffer.seek(0)
        chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
        self.buffer.seek(0)
        self.buffer.truncate(0)
        # Leave the last partial chunk in the buffer unless flush is True
        end = None if flush or len(chunks) == 1 else -1
        for chunk in chunks[:end]:
            self.chunks.append(self.write_chunk(chunk))
        if end == -1:
            self.buffer.write(chunks[-1])

    def is_full(self):
        return self.buffer.tell() > self.BUFFER_SIZE
Exemple #10
0
class CsvDictsAdapter(object):
    """Provide a DataChange generator and it provides a file-like object which returns csv data"""
    def __init__(self, source_generator):
        self.source = source_generator
        self.buffer = BytesIO()
        self.csv = None
        self.add_header = False

    def __iter__(self):
        return self

    def write_header(self):
        self.add_header = True

    def next(self):
        row = self.source.next()

        self.buffer.truncate(0)
        self.buffer.seek(0)

        if not self.csv:
            self.csv = csv.DictWriter(self.buffer, row.keys(), quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')
            self.add_header = True
        if self.add_header:
            if hasattr(self.csv, 'writeheader'):
                self.csv.writeheader()
            else:
                self.csv.writerow(dict((fn, fn) for fn in self.csv.fieldnames))
            self.add_header = False

        self.csv.writerow(row)
        self.buffer.seek(0)
        return self.buffer.read()
Exemple #11
0
def test_str_round():
    # from report by Angus McMorland on mailing list 3 May 2010
    stream = BytesIO()
    in_arr = np.array(['Hello', 'Foob'])
    out_arr = np.array(['Hello', 'Foob '])
    savemat(stream, dict(a=in_arr))
    res = loadmat(stream)
    # resulted in ['HloolFoa', 'elWrdobr']
    assert_array_equal(res['a'], out_arr)
    stream.truncate(0)
    stream.seek(0)
    # Make Fortran ordered version of string
    in_str = in_arr.tostring(order='F')
    in_from_str = np.ndarray(shape=a.shape,
                             dtype=in_arr.dtype,
                             order='F',
                             buffer=in_str)
    savemat(stream, dict(a=in_from_str))
    assert_array_equal(res['a'], out_arr)
    # unicode save did lead to buffer too small error
    stream.truncate(0)
    stream.seek(0)
    in_arr_u = in_arr.astype('U')
    out_arr_u = out_arr.astype('U')
    savemat(stream, {'a': in_arr_u})
    res = loadmat(stream)
    assert_array_equal(res['a'], out_arr_u)
Exemple #12
0
class UnicodeWriter:
    """
    CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    We are using this custom UnicodeWriter for python versions 2.x
    """

    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
        self.queue = BytesIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow([s.encode("utf-8") for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Exemple #13
0
def test_array_file_scales():
    # Test scaling works for max, min when going from larger to smaller type,
    # and from float to integer.
    bio = BytesIO()
    for in_type, out_type, err in ((np.int16, np.int16, None),
                                   (np.int16, np.int8, None),
                                   (np.uint16, np.uint8, None),
                                   (np.int32, np.int8, None),
                                   (np.float32, np.uint8, None),
                                   (np.float32, np.int16, None)):
        out_dtype = np.dtype(out_type)
        arr = np.zeros((3,), dtype=in_type)
        info = type_info(in_type)
        arr[0], arr[1] = info['min'], info['max']
        if not err is None:
            assert_raises(err, calculate_scale, arr, out_dtype, True)
            continue
        slope, inter, mn, mx = calculate_scale(arr, out_dtype, True)
        array_to_file(arr, bio, out_type, 0, inter, slope, mn, mx)
        bio.seek(0)
        arr2 = array_from_file(arr.shape, out_dtype, bio)
        arr3 = apply_read_scaling(arr2, slope, inter)
        # Max rounding error for integer type
        max_miss = slope / 2.
        assert_true(np.all(np.abs(arr - arr3) <= max_miss))
        bio.truncate(0)
        bio.seek(0)
Exemple #14
0
def tar_stream(repo, tree, mtime, format=''):
    """
    Returns a generator that lazily assembles a .tar.gz archive, yielding it in
    pieces (bytestrings). To obtain the complete .tar.gz binary file, simply
    concatenate these chunks.

    'repo' and 'tree' are the dulwich Repo and Tree objects the archive shall be
    created from. 'mtime' is a UNIX timestamp that is assigned as the modification
    time of all files in the resulting .tar.gz archive.
    """
    buf = BytesIO()
    with closing(tarfile.open(None, "w:%s" % format, buf)) as tar:
        for entry_abspath, entry in walk_tree(repo, tree):
            try:
                blob = repo[entry.sha]
            except KeyError:
                # Entry probably refers to a submodule, which we don't yet support.
                continue
            data = ListBytesIO(blob.chunked)

            info = tarfile.TarInfo()
            info.name = entry_abspath
            info.size = blob.raw_length()
            info.mode = entry.mode
            info.mtime = mtime

            tar.addfile(info, data)
            yield buf.getvalue()
            buf.truncate(0)
            buf.seek(0)
    yield buf.getvalue()
Exemple #15
0
 def _download_part( self, part_num ):
     """
     Download a part from the source URL. Returns a BytesIO buffer. The buffer's tell() method
     will return the size of the downloaded part, which may be less than the requested part
     size if the part is the last one for the URL.
     """
     buf = BytesIO( )
     with closing( pycurl.Curl( ) ) as c:
         c.setopt( c.URL, self.url )
         c.setopt( c.WRITEDATA, buf )
         c.setopt( c.FAILONERROR, 1 )
         start, end = self._get_part_range( part_num )
         c.setopt( c.RANGE, "%i-%i" % (start, end - 1) )
         try:
             c.perform( )
         except pycurl.error as e:
             error_code, message = e
             if error_code == c.E_BAD_DOWNLOAD_RESUME:  # bad range for FTP
                 pass
             elif error_code == c.E_HTTP_RETURNED_ERROR:
                 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.17
                 if c.getinfo( c.RESPONSE_CODE ) == 416:
                     pass
                 else:
                     raise
             else:
                 raise
             buf.truncate( 0 )
             buf.seek( 0 )
     return buf
Exemple #16
0
    class UnicodeWriter:
        """
        A CSV writer which will write rows to CSV file "f",
        which is encoded in the given encoding.
        """

        def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
            # Redirect output to a queue
            self.queue = BytesIO()
            self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
            self.stream = f
            self.encoder = codecs.getincrementalencoder(encoding)('replace')

        def writerow(self, row):
            row = [smart_text(s) for s in row]
            self.writer.writerow([s.encode("utf-8") for s in row])
            # Fetch UTF-8 output from the queue ...
            data = self.queue.getvalue()
            data = data.decode("utf-8")
            # ... and reencode it into the target encoding
            data = self.encoder.encode(data)
            # write to the target stream
            self.stream.write(data)
            # empty queue
            self.queue.truncate(0)

        def writerows(self, rows):
            for row in rows:
                self.writerow(row)
Exemple #17
0
class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """

    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        try:
            self.writer.writerow([s.decode('utf8').encode("utf-8") if isinstance(s, basestring)
                                  else s for s in row])
        except:
            self.writer.writerow([s.encode("utf-8") if isinstance(s, basestring)
                                  else s for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Exemple #18
0
class StatefulCommandCompiler(CommandCompiler):
    """A command compiler that buffers input until a full command is available."""

    def __init__(self):
        super().__init__()
        self.buf = BytesIO()

    def is_partial_command(self):
        return bool(self.buf.getvalue())

    def __call__(self, source, **kwargs):
        buf = self.buf
        if self.is_partial_command():
            buf.write(b'\n')
        buf.write(source)

        code = self.buf.getvalue().decode('utf8')

        codeobj = super().__call__(code, **kwargs)

        if codeobj:
            self.reset()
        return codeobj

    def reset(self):
        self.buf.seek(0)
        self.buf.truncate(0)
Exemple #19
0
def push():
    # set this to Application ID from Instapush
    appID = "<FILL THIS IN>"

    # set this to the Application Secret from Instapush
    appSecret = "<FILL THIS IN>"

    # leave this set to DoorAlert unless you named your event something different in Instapush
    pushEvent = "Doorbell"

    # set this to what you want the push message to say
    pushMessage = "Ding dong!  Check doorbell.jpg on dropbox!"

    # use StringIO to capture the response from our push API call
    buffer = StringIO()

    # use Curl to post to the Instapush API
    c = pycurl.Curl()

    # set Instapush API URL
    c.setopt(c.URL, 'https://api.instapush.im/v1/post')

    # setup custom headers for authentication variables and content type
    c.setopt(c.HTTPHEADER, ['x-instapush-appid: ' + appID,
         'x-instapush-appsecret: ' + appSecret,
         'Content-Type: application/json'])

    # create a dictionary structure for the JSON data to post to Instapush
    json_fields = {}

    # setup JSON values
    json_fields['event'] = pushEvent
    json_fields['trackers'] = {}
    json_fields['trackers']['msg'] = pushMessage

    postfields = json.dumps(json_fields)

    # make sure to send the JSON with post
    c.setopt(c.POSTFIELDS, postfields)

    # set this so we can capture the resposne in our buffer
    c.setopt(c.WRITEFUNCTION, buffer.write)

    # uncomment to see the post that is sent
    # c.setopt(c.VERBOSE, True)
    c.perform()

    # capture the response from the server
    body = buffer.getvalue()

    # print the response
    print(body)

    # reset the buffer
    buffer.truncate(0)
    buffer.seek(0)

    # cleanup
    c.close()
Exemple #20
0
class ChunkedReader(object):

    def __init__(self, socket_file, maxsize):
        self.socket_file, self.maxsize = socket_file, maxsize
        self.rbuf = BytesIO()
        self.bytes_read = 0
        self.finished = False

    def check_size(self):
        if self.bytes_read > self.maxsize:
            raise MaxSizeExceeded('Request entity too large', self.bytes_read, self.maxsize)

    def read_chunk(self):
        if self.finished:
            return
        line = self.socket_file.readline()
        self.bytes_read += len(line)
        self.check_size()
        chunk_size = line.strip().split(b';', 1)[0]
        try:
            chunk_size = int(line, 16) + 2
        except Exception:
            raise ValueError('%s is not a valid chunk size' % reprlib.repr(chunk_size))
        if chunk_size + self.bytes_read > self.maxsize:
            raise MaxSizeExceeded('Request entity too large', self.bytes_read + chunk_size, self.maxsize)
        chunk = self.socket_file.read(chunk_size)
        if len(chunk) < chunk_size:
            raise ValueError('Bad chunked encoding, chunk truncated: %d < %s' % (len(chunk), chunk_size))
        if not chunk.endswith(b'\r\n'):
            raise ValueError('Bad chunked encoding: %r != CRLF' % chunk[:-2])
        self.rbuf.seek(0, os.SEEK_END)
        self.bytes_read += chunk_size
        if chunk_size == 2:
            self.finished = True
        else:
            self.rbuf.write(chunk[:-2])

    def read(self, size=-1):
        if size < 0:
            # Read all data
            while not self.finished:
                self.read_chunk()
            self.rbuf.seek(0)
            rv = self.rbuf.read()
            if rv:
                self.rbuf.truncate(0)
            return rv
        if size == 0:
            return b''
        while self.rbuf.tell() < size and not self.finished:
            self.read_chunk()
        data = self.rbuf.getvalue()
        self.rbuf.truncate(0)
        if size < len(data):
            self.rbuf.write(data[size:])
            return data[:size]
        return data
Exemple #21
0
class Serialization(unittest.TestCase):
    def setUp(self):
        self.t1 = m.SporadicTask(10, 100)
        self.t2 = m.SporadicTask(5, 19, 15, id=3)
        self.t3 = m.SporadicTask(25, 50, id=5, deadline=75)
        self.ts = m.TaskSystem([self.t1, self.t2, self.t3])
        self.f  = BytesIO()

    def test_serialize_task(self):
        for t in self.ts:
            s.write_xml(s.task(t), self.f)
            self.f.seek(0)
            x = s.load(self.f)
            self.assertIsInstance(x, m.SporadicTask)
            self.assertEqual(x.cost, t.cost)
            self.assertEqual(x.deadline, t.deadline)
            self.assertEqual(x.period, t.period)
            self.assertEqual(x.id, t.id)
            self.f.seek(0)
            self.f.truncate()

    def test_serialize_taskset(self):
        s.write(self.ts, self.f)
        self.f.seek(0)
        xs = s.load(self.f)
        self.assertIsInstance(xs, m.TaskSystem)
        self.assertEqual(len(xs), len(self.ts))
        for x,t in zip(xs, self.ts):
            self.assertEqual(x.cost, t.cost)
            self.assertEqual(x.deadline, t.deadline)
            self.assertEqual(x.period, t.period)
            self.assertEqual(x.id, t.id)

    def test_serialize_resmodel(self):
        r.initialize_resource_model(self.ts)
        self.t1.resmodel[1].add_request(1)
        self.t2.resmodel[1].add_read_request(2)
        self.t2.resmodel['serial I/O'].add_request(2)
        self.t3.resmodel['serial I/O'].add_request(3)

        for t in self.ts:
            s.write_xml(s.task(t), self.f)
            self.f.seek(0)
            x = s.load(self.f)
            self.assertIsInstance(x.resmodel, r.ResourceRequirements)
            self.assertEqual(len(x.resmodel), len(t.resmodel))
            self.assertEqual(list(x.resmodel.keys()), list(t.resmodel.keys()))
            for res_id in x.resmodel:
                self.assertEqual(x.resmodel[res_id].max_reads, t.resmodel[res_id].max_reads)
                self.assertEqual(x.resmodel[res_id].max_writes, t.resmodel[res_id].max_writes)
                self.assertEqual(x.resmodel[res_id].max_requests, t.resmodel[res_id].max_requests)
                self.assertEqual(x.resmodel[res_id].max_read_length, t.resmodel[res_id].max_read_length)
                self.assertEqual(x.resmodel[res_id].max_write_length, t.resmodel[res_id].max_write_length)
                self.assertEqual(x.resmodel[res_id].max_length, t.resmodel[res_id].max_length)
            self.f.seek(0)
            self.f.truncate()
Exemple #22
0
def test_round_types():
    # Check that saving, loading preserves dtype in most cases
    arr = np.arange(10)
    stream = BytesIO()
    for dts in ("f8", "f4", "i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "c16", "c8"):
        stream.truncate(0)
        stream.seek(0)  # needed for BytesIO in python 3
        savemat_future(stream, {"arr": arr.astype(dts)})
        vars = loadmat(stream)
        assert_equal(np.dtype(dts), vars["arr"].dtype)
def alpino_parse_tokenized_file_socket(tokenized_file_path: Path,
                                       target_dir_path: Path,
                                       host='127.0.0.1',
                                       base_port_number=42424,
                                       n_instances: int=4):
    """
    Warning: produces corrupt output for half of the sentences.
    """
    chosen_port = randrange(base_port_number, base_port_number + n_instances)

    parsed_sentences_files_dir_path, parsed_sentence_file_path_prefix = \
        prepare_parsing(tokenized_file_path=tokenized_file_path,
                        target_dir_path=target_dir_path)

    if parsed_sentences_files_dir_path is not None:
        with tokenized_file_path.open(mode='rb') as tokenized_file:
            parsed_sentence_buffer = BytesIO()
            sentence_index = 0

            for sentence in tokenized_file.readlines():
                sentence_index += 1
                with socket(AF_INET, SOCK_STREAM) as alpino_socket:
                    # alpino_socket.settimeout() # TODO: set timeout equal
                    # to Alpino timeout
                    alpino_socket.connect_ex((host, chosen_port))
                    alpino_socket.sendall(sentence + b'\n\n')

                    while True:
                        parsed_sentence_xml_chunk = alpino_socket.recv(
                            ALPINO_SOCKET_BUFFER_SIZE)
                        if not parsed_sentence_xml_chunk:
                            alpino_socket.sendall(b'\n\n')
                            break
                        else:
                            parsed_sentence_buffer.write(
                                parsed_sentence_xml_chunk)

                parsed_sentence = parsed_sentence_buffer.getvalue()

                parsed_sentence_buffer.truncate()
                parsed_sentence_buffer.flush()
                parsed_sentence_buffer.seek(0)

                parsed_sentence_file_path = \
                    parsed_sentence_file_path_prefix.with_suffix(
                        '.{0:d}.xml'.format(sentence_index))
                with parsed_sentence_file_path.open(
                        mode='wb') as parsed_sentence_file:
                    parsed_sentence_file.write(parsed_sentence)

            info(
                "Parsed tokenized file to '{"
                "parsed_sentence_file_path_prefix}'.*.xml . "
                "".format(
                    parsed_sentence_file_path_prefix=parsed_sentence_file_path_prefix))
Exemple #24
0
def test_round_types():
    # Check that saving, loading preserves dtype in most cases
    arr = np.arange(10)
    stream = BytesIO()
    for dts in ('f8','f4','i8','i4','i2','i1',
                'u8','u4','u2','u1','c16','c8'):
        stream.truncate(0)
        stream.seek(0)  # needed for BytesIO in python 3
        savemat(stream, {'arr': arr.astype(dts)})
        vars = loadmat(stream)
        assert_equal(np.dtype(dts), vars['arr'].dtype)
Exemple #25
0
 def sio_func():
     fio = BytesIO()
     fio.truncate(0)
     fio.seek(offset)
     fio.write(data)
     # Use a copy of the header to avoid changing
     # global header in test functions.
     new_hdr = hdr.copy()
     return (self.proxy_class(fio, new_hdr),
             fio,
             new_hdr)
Exemple #26
0
    def test_digest(self):
        iio = BytesIO()
        for tv in self.__class__.TEST_VECTORS:
            iio.truncate(0)
            iio.seek(0)

            iio.write(tv.text)
            iio.seek(0)
            digest = tv.cls.digest(iio)

            self.assertEqual(tv.digest, digest, "{}{}".format(tv.cls, tv.text))
Exemple #27
0
def thumbnail(image_data: io.BytesIO, max_size=(128, 128)) -> None:
	"""Resize an image in place to no more than max_size pixels, preserving aspect ratio."""
	with wand.image.Image(blob=image_data) as image:
		new_resolution = scale_resolution((image.width, image.height), max_size)
		image.resize(*new_resolution)
		image_data.truncate(0)
		image_data.seek(0)
		image.save(file=image_data)

	# allow resizing the original image more than once for memory profiling
	image_data.seek(0)
def droptests(fp, **kw):
    """ Drop tests from chrome.manifest """
    kw = kw
    io = BytesIO()
    with Reset(io):
        for l in fp:
            if b"dta-tests" in l:
                continue
            io.write(l)
        io.truncate()
    return io
class TestQuotedPrintableDecoder(unittest.TestCase):
    def setUp(self):
        self.f = BytesIO()
        self.d = QuotedPrintableDecoder(self.f)

    def assert_data(self, data, finalize=True):
        if finalize:
            self.d.finalize()

        self.f.seek(0)
        self.assertEqual(self.f.read(), data)
        self.f.seek(0)
        self.f.truncate()

    def test_simple(self):
        self.d.write(b'foobar')
        self.assert_data(b'foobar')

    def test_with_escape(self):
        self.d.write(b'foo=3Dbar')
        self.assert_data(b'foo=bar')

    def test_with_newline_escape(self):
        self.d.write(b'foo=\r\nbar')
        self.assert_data(b'foobar')

    def test_with_only_newline_escape(self):
        self.d.write(b'foo=\nbar')
        self.assert_data(b'foobar')

    def test_with_split_escape(self):
        self.d.write(b'foo=3')
        self.d.write(b'Dbar')
        self.assert_data(b'foo=bar')

    def test_with_split_newline_escape_1(self):
        self.d.write(b'foo=\r')
        self.d.write(b'\nbar')
        self.assert_data(b'foobar')

    def test_with_split_newline_escape_2(self):
        self.d.write(b'foo=')
        self.d.write(b'\r\nbar')
        self.assert_data(b'foobar')

    def test_close_and_finalize(self):
        parser = Mock()
        f = QuotedPrintableDecoder(parser)

        f.finalize()
        parser.finalize.assert_called_once_with()

        f.close()
        parser.close.assert_called_once_with()
Exemple #30
0
class BufferedPktLineWriterTests(TestCase):

    def setUp(self):
        TestCase.setUp(self)
        self._output = BytesIO()
        self._writer = BufferedPktLineWriter(self._output.write, bufsize=16)

    def assertOutputEquals(self, expected):
        self.assertEqual(expected, self._output.getvalue())

    def _truncate(self):
        self._output.seek(0)
        self._output.truncate()

    def test_write(self):
        self._writer.write(b'foo')
        self.assertOutputEquals(b'')
        self._writer.flush()
        self.assertOutputEquals(b'0007foo')

    def test_write_none(self):
        self._writer.write(None)
        self.assertOutputEquals(b'')
        self._writer.flush()
        self.assertOutputEquals(b'0000')

    def test_flush_empty(self):
        self._writer.flush()
        self.assertOutputEquals(b'')

    def test_write_multiple(self):
        self._writer.write(b'foo')
        self._writer.write(b'bar')
        self.assertOutputEquals(b'')
        self._writer.flush()
        self.assertOutputEquals(b'0007foo0007bar')

    def test_write_across_boundary(self):
        self._writer.write(b'foo')
        self._writer.write(b'barbaz')
        self.assertOutputEquals(b'0007foo000abarba')
        self._truncate()
        self._writer.flush()
        self.assertOutputEquals(b'z')

    def test_write_to_boundary(self):
        self._writer.write(b'foo')
        self._writer.write(b'barba')
        self.assertOutputEquals(b'0007foo0009barba')
        self._truncate()
        self._writer.write(b'z')
        self._writer.flush()
        self.assertOutputEquals(b'0005z')
Exemple #31
0
class DummyResource(AbstractResource):
    _root = None

    def __init__(self,
                 prefix: str,
                 path: str = '/',
                 is_collection=True,
                 parent=None,
                 ctime=None,
                 mtime=None,
                 exists=None):
        super().__init__(prefix, path)
        if path == '/':
            self._exists = True
            if self._root:
                raise ValueError("Second _root")
            self.__class__._root = self
        else:
            self._exists = exists
        self._is_directory = is_collection
        if self._exists:
            if not is_collection:
                self._content = BytesIO()
            else:
                self._resources = {}
        dirname = os.path.dirname(path)
        self._parent = parent or self._root.with_relative(dirname)

        self._ctime = ctime or datetime.now()
        self._mtime = mtime or datetime.now()

    def _touch_file(self):
        # noinspection PyProtectedMember
        if not self.parent or not self.parent._exists:
            raise errors.ResourceDoesNotExist("Parent resource does not exist")
        if not self.parent.is_collection:
            raise errors.InvalidResourceType("Collection expected")
        # noinspection PyProtectedMember
        self._parent._resources[self.name] = self
        self._exists = True
        self._content = BytesIO()
        self._is_directory = False

    async def get_content(self,
                          write: typing.Callable[[bytes], typing.Any],
                          *,
                          offset: int = 0,
                          limit: int = None):
        if self._is_directory:
            raise errors.InvalidResourceType("file resource expected")
        self._content.seek(offset)
        buffer = self._content.read(limit)
        if asyncio.iscoroutinefunction(write):
            await write(buffer)
        else:
            write(buffer)

    def with_relative(self, relative) -> 'AbstractResource':
        if relative == '/':
            return self
        res = self
        parts = relative.strip('/').split('/')
        for part in parts[:-1]:
            try:
                res = res._resources[part]
            except KeyError:
                raise errors.ResourceDoesNotExist(
                    "one of parent resources does not exist")
        part = parts[-1]
        try:
            if not res.is_collection:
                raise errors.InvalidResourceType("collection expected")
            res = res._resources[part]
        except KeyError:
            res = DummyResource(self.prefix,
                                os.path.join(res.path, part),
                                parent=res,
                                exists=False)
        return res

    @property
    def size(self) -> int:
        if self.is_collection:
            return 0
        return len(self._content.getvalue())

    @property
    def mtime(self):
        return self._mtime

    @property
    def ctime(self):
        return self._ctime

    async def put_content(self, read_some: typing.Awaitable[bytes]) -> bool:
        if self._exists:
            created = False
            if self.is_collection:
                raise errors.InvalidResourceType("file resource expected")
        else:
            created = True
            self._touch_file()

        self._content.seek(0)
        self._content.truncate()
        if not read_some:
            return created
        while True:
            buffer = await read_some()
            self._content.write(buffer)
            if not buffer:
                return created

    def propfind(self, *props) -> OrderedDict:
        fmt = '%Y-%m-%dT%H:%M:%SZ'
        ctime = self._ctime.strftime(fmt)
        mtime = self._mtime.strftime(fmt)
        all_props = OrderedDict([
            ('getcontenttype', ''),
            ('getlastmodified', mtime),
            ('getcontentlength', self.size),
            ('getetag', ''),
            ('creationdate', ctime),
            ('displayname', self.name),
        ])
        if not props:
            return all_props
        return OrderedDict(p for p in all_props.items() if p[0] in props)

    async def populate_props(self):
        if not self._exists:
            raise errors.ResourceDoesNotExist()
        return

    async def populate_collection(self):
        if not self._exists:
            raise errors.ResourceDoesNotExist()
        return

    @property
    def parent(self) -> 'AbstractResource':
        if self._path == '/':
            return None
        return self._parent

    @property
    def name(self) -> str:
        return os.path.basename(self.path)

    # noinspection PyProtectedMember
    async def move(self, destination: str) -> bool:
        old_name = self.name
        dest_resource = self._root.with_relative(destination)
        if not dest_resource._exists:
            self._path = dest_resource.path
            dest_resource = dest_resource._parent
        await dest_resource.populate_props()
        if not dest_resource.is_collection:
            raise errors.InvalidResourceType("collection expected")
        del self._parent._resources[old_name]
        self._parent = dest_resource
        self._path = os.path.join(self._parent.path, self.name)
        dest_resource._resources[self.name] = self
        return True

    async def make_collection(self, collection: str) -> 'AbstractResource':
        collection = collection.strip('/')
        parent = os.path.dirname(collection)
        name = os.path.basename(collection)
        if not parent:
            parent = self
        else:
            parent = self._root.with_relative(parent)

        if not parent.is_collection:
            raise errors.InvalidResourceType("collection expected")
        # noinspection PyProtectedMember
        if name in parent._resources:
            raise errors.ResourceAlreadyExists("collection already exists")
        collection = DummyResource(prefix=self.prefix,
                                   path=os.path.join(parent.path, collection),
                                   parent=parent,
                                   is_collection=True,
                                   exists=True)
        # noinspection PyProtectedMember
        parent._resources[name] = collection
        return collection

    @property
    def is_collection(self):
        return self._is_directory

    async def delete(self):
        if not self._exists:
            raise errors.ResourceDoesNotExist()
        # noinspection PyProtectedMember
        del self._parent._resources[self.name]

    # noinspection PyProtectedMember
    async def copy(self, destination: str) -> 'AbstractResource':
        if not self.is_collection:
            return self._copy_file(destination)

        dest = self._root / destination.strip('/')
        if not dest.is_collection:
            raise errors.InvalidResourceType("collection expected")
        if not dest._exists:
            name = dest.name
            dest = dest._parent
        else:
            name = self.name
        new_dir = self._clone()
        new_dir._parent = dest
        dest._resources[name] = new_dir
        new_dir._path = os.path.join(dest.path, name)
        for res in self.collection:
            await res.copy(new_dir.path)
        return new_dir

    # noinspection PyProtectedMember
    def _copy_file(self, destination: str) -> 'AbstractResource':
        dest = self._root / destination.strip('/')
        if dest._exists:
            if not dest.is_collection:
                raise errors.ResourceAlreadyExists(
                    "destination file already exists")
            parent = dest
            name = self.name
        else:
            parent = dest._parent
            name = dest.name

        new_file = self._clone()
        new_file._parent = parent
        new_file._path = os.path.join(parent.path, name)
        parent._resources[name] = new_file
        return new_file

    @property
    def collection(self) -> typing.List['AbstractResource']:
        result = []
        for k in sorted(self._resources.keys()):
            result.append(self._resources[k])
        return result

    def _clone(self):
        new = DummyResource(prefix=self.prefix,
                            path=self.path,
                            is_collection=self.is_collection,
                            parent=self._parent,
                            exists=self._exists)
        if new.is_collection:
            new._resources = {}
        else:
            new._content = BytesIO(self._content.getvalue())
        return new

    def __repr__(self):
        return "Dummy<%s>" % self.path  # pragma: no cover
Exemple #32
0
class NetstringReceiver(protocol.Protocol):
    """
    A protocol that sends and receives netstrings.

    See U{http://cr.yp.to/proto/netstrings.txt} for the specification of
    netstrings. Every netstring starts with digits that specify the length
    of the data. This length specification is separated from the data by
    a colon. The data is terminated with a comma.

    Override L{stringReceived} to handle received netstrings. This
    method is called with the netstring payload as a single argument
    whenever a complete netstring is received.

    Security features:
        1. Messages are limited in size, useful if you don't want
           someone sending you a 500MB netstring (change C{self.MAX_LENGTH}
           to the maximum length you wish to accept).
        2. The connection is lost if an illegal message is received.

    @ivar MAX_LENGTH: Defines the maximum length of netstrings that can be
        received.
    @type MAX_LENGTH: C{int}

    @ivar _LENGTH: A pattern describing all strings that contain a netstring
        length specification. Examples for length specifications are C{b'0:'},
        C{b'12:'}, and C{b'179:'}. C{b'007:'} is not a valid length
        specification, since leading zeros are not allowed.
    @type _LENGTH: C{re.Match}

    @ivar _LENGTH_PREFIX: A pattern describing all strings that contain
        the first part of a netstring length specification (without the
        trailing comma). Examples are '0', '12', and '179'. '007' does not
        start a netstring length specification, since leading zeros are
        not allowed.
    @type _LENGTH_PREFIX: C{re.Match}

    @ivar _PARSING_LENGTH: Indicates that the C{NetstringReceiver} is in
        the state of parsing the length portion of a netstring.
    @type _PARSING_LENGTH: C{int}

    @ivar _PARSING_PAYLOAD: Indicates that the C{NetstringReceiver} is in
        the state of parsing the payload portion (data and trailing comma)
        of a netstring.
    @type _PARSING_PAYLOAD: C{int}

    @ivar brokenPeer: Indicates if the connection is still functional
    @type brokenPeer: C{int}

    @ivar _state: Indicates if the protocol is consuming the length portion
        (C{PARSING_LENGTH}) or the payload (C{PARSING_PAYLOAD}) of a netstring
    @type _state: C{int}

    @ivar _remainingData: Holds the chunk of data that has not yet been consumed
    @type _remainingData: C{string}

    @ivar _payload: Holds the payload portion of a netstring including the
        trailing comma
    @type _payload: C{BytesIO}

    @ivar _expectedPayloadSize: Holds the payload size plus one for the trailing
        comma.
    @type _expectedPayloadSize: C{int}
    """
    MAX_LENGTH = 99999
    _LENGTH = re.compile(b'(0|[1-9]\d*)(:)')

    _LENGTH_PREFIX = re.compile(b'(0|[1-9]\d*)$')

    # Some error information for NetstringParseError instances.
    _MISSING_LENGTH = ("The received netstring does not start with a "
                       "length specification.")
    _OVERFLOW = ("The length specification of the received netstring "
                 "cannot be represented in Python - it causes an "
                 "OverflowError!")
    _TOO_LONG = ("The received netstring is longer than the maximum %s "
                 "specified by self.MAX_LENGTH")
    _MISSING_COMMA = "The received netstring is not terminated by a comma."

    # The following constants are used for determining if the NetstringReceiver
    # is parsing the length portion of a netstring, or the payload.
    _PARSING_LENGTH, _PARSING_PAYLOAD = range(2)

    def makeConnection(self, transport):
        """
        Initializes the protocol.
        """
        protocol.Protocol.makeConnection(self, transport)
        self._remainingData = b""
        self._currentPayloadSize = 0
        self._payload = BytesIO()
        self._state = self._PARSING_LENGTH
        self._expectedPayloadSize = 0
        self.brokenPeer = 0

    def sendString(self, string):
        """
        Sends a netstring.

        Wraps up C{string} by adding length information and a
        trailing comma; writes the result to the transport.

        @param string: The string to send.  The necessary framing (length
            prefix, etc) will be added.
        @type string: C{bytes}
        """
        self.transport.write(_formatNetstring(string))

    def dataReceived(self, data):
        """
        Receives some characters of a netstring.

        Whenever a complete netstring is received, this method extracts
        its payload and calls L{stringReceived} to process it.

        @param data: A chunk of data representing a (possibly partial)
            netstring
        @type data: C{bytes}
        """
        self._remainingData += data
        while self._remainingData:
            try:
                self._consumeData()
            except IncompleteNetstring:
                break
            except NetstringParseError:
                self._handleParseError()
                break

    def stringReceived(self, string):
        """
        Override this for notification when each complete string is received.

        @param string: The complete string which was received with all
            framing (length prefix, etc) removed.
        @type string: C{bytes}

        @raise NotImplementedError: because the method has to be implemented
            by the child class.
        """
        raise NotImplementedError()

    def _maxLengthSize(self):
        """
        Calculate and return the string size of C{self.MAX_LENGTH}.

        @return: The size of the string representation for C{self.MAX_LENGTH}
        @rtype: C{float}
        """
        return math.ceil(math.log10(self.MAX_LENGTH)) + 1

    def _consumeData(self):
        """
        Consumes the content of C{self._remainingData}.

        @raise IncompleteNetstring: if C{self._remainingData} does not
            contain enough data to complete the current netstring.
        @raise NetstringParseError: if the received data do not
            form a valid netstring.
        """
        if self._state == self._PARSING_LENGTH:
            self._consumeLength()
            self._prepareForPayloadConsumption()
        if self._state == self._PARSING_PAYLOAD:
            self._consumePayload()

    def _consumeLength(self):
        """
        Consumes the length portion of C{self._remainingData}.

        @raise IncompleteNetstring: if C{self._remainingData} contains
            a partial length specification (digits without trailing
            comma).
        @raise NetstringParseError: if the received data do not form a valid
            netstring.
        """
        lengthMatch = self._LENGTH.match(self._remainingData)
        if not lengthMatch:
            self._checkPartialLengthSpecification()
            raise IncompleteNetstring()
        self._processLength(lengthMatch)

    def _checkPartialLengthSpecification(self):
        """
        Makes sure that the received data represents a valid number.

        Checks if C{self._remainingData} represents a number smaller or
        equal to C{self.MAX_LENGTH}.

        @raise NetstringParseError: if C{self._remainingData} is no
            number or is too big (checked by L{_extractLength}).
        """
        partialLengthMatch = self._LENGTH_PREFIX.match(self._remainingData)
        if not partialLengthMatch:
            raise NetstringParseError(self._MISSING_LENGTH)
        lengthSpecification = (partialLengthMatch.group(1))
        self._extractLength(lengthSpecification)

    def _processLength(self, lengthMatch):
        """
        Processes the length definition of a netstring.

        Extracts and stores in C{self._expectedPayloadSize} the number
        representing the netstring size.  Removes the prefix
        representing the length specification from
        C{self._remainingData}.

        @raise NetstringParseError: if the received netstring does not
            start with a number or the number is bigger than
            C{self.MAX_LENGTH}.
        @param lengthMatch: A regular expression match object matching
            a netstring length specification
        @type lengthMatch: C{re.Match}
        """
        endOfNumber = lengthMatch.end(1)
        startOfData = lengthMatch.end(2)
        lengthString = self._remainingData[:endOfNumber]
        # Expect payload plus trailing comma:
        self._expectedPayloadSize = self._extractLength(lengthString) + 1
        self._remainingData = self._remainingData[startOfData:]

    def _extractLength(self, lengthAsString):
        """
        Attempts to extract the length information of a netstring.

        @raise NetstringParseError: if the number is bigger than
            C{self.MAX_LENGTH}.
        @param lengthAsString: A chunk of data starting with a length
            specification
        @type lengthAsString: C{bytes}
        @return: The length of the netstring
        @rtype: C{int}
        """
        self._checkStringSize(lengthAsString)
        length = int(lengthAsString)
        if length > self.MAX_LENGTH:
            raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH, ))
        return length

    def _checkStringSize(self, lengthAsString):
        """
        Checks the sanity of lengthAsString.

        Checks if the size of the length specification exceeds the
        size of the string representing self.MAX_LENGTH. If this is
        not the case, the number represented by lengthAsString is
        certainly bigger than self.MAX_LENGTH, and a
        NetstringParseError can be raised.

        This method should make sure that netstrings with extremely
        long length specifications are refused before even attempting
        to convert them to an integer (which might trigger a
        MemoryError).
        """
        if len(lengthAsString) > self._maxLengthSize():
            raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH, ))

    def _prepareForPayloadConsumption(self):
        """
        Sets up variables necessary for consuming the payload of a netstring.
        """
        self._state = self._PARSING_PAYLOAD
        self._currentPayloadSize = 0
        self._payload.seek(0)
        self._payload.truncate()

    def _consumePayload(self):
        """
        Consumes the payload portion of C{self._remainingData}.

        If the payload is complete, checks for the trailing comma and
        processes the payload. If not, raises an L{IncompleteNetstring}
        exception.

        @raise IncompleteNetstring: if the payload received so far
            contains fewer characters than expected.
        @raise NetstringParseError: if the payload does not end with a
        comma.
        """
        self._extractPayload()
        if self._currentPayloadSize < self._expectedPayloadSize:
            raise IncompleteNetstring()
        self._checkForTrailingComma()
        self._state = self._PARSING_LENGTH
        self._processPayload()

    def _extractPayload(self):
        """
        Extracts payload information from C{self._remainingData}.

        Splits C{self._remainingData} at the end of the netstring.  The
        first part becomes C{self._payload}, the second part is stored
        in C{self._remainingData}.

        If the netstring is not yet complete, the whole content of
        C{self._remainingData} is moved to C{self._payload}.
        """
        if self._payloadComplete():
            remainingPayloadSize = (self._expectedPayloadSize -
                                    self._currentPayloadSize)
            self._payload.write(self._remainingData[:remainingPayloadSize])
            self._remainingData = self._remainingData[remainingPayloadSize:]
            self._currentPayloadSize = self._expectedPayloadSize
        else:
            self._payload.write(self._remainingData)
            self._currentPayloadSize += len(self._remainingData)
            self._remainingData = b""

    def _payloadComplete(self):
        """
        Checks if enough data have been received to complete the netstring.

        @return: C{True} iff the received data contain at least as many
            characters as specified in the length section of the
            netstring
        @rtype: C{bool}
        """
        return (len(self._remainingData) + self._currentPayloadSize >=
                self._expectedPayloadSize)

    def _processPayload(self):
        """
        Processes the actual payload with L{stringReceived}.

        Strips C{self._payload} of the trailing comma and calls
        L{stringReceived} with the result.
        """
        self.stringReceived(self._payload.getvalue()[:-1])

    def _checkForTrailingComma(self):
        """
        Checks if the netstring has a trailing comma at the expected position.

        @raise NetstringParseError: if the last payload character is
            anything but a comma.
        """
        if self._payload.getvalue()[-1:] != b",":
            raise NetstringParseError(self._MISSING_COMMA)

    def _handleParseError(self):
        """
        Terminates the connection and sets the flag C{self.brokenPeer}.
        """
        self.transport.loseConnection()
        self.brokenPeer = 1
Exemple #33
0
class Reader(object):
    def __init__(self, socket, socket_read_size):
        self._sock = socket
        self.socket_read_size = socket_read_size
        self._buffer = BytesIO()
        self.bytes_written = 0
        self.bytes_read = 0

    @property
    def length(self):
        return self.bytes_written - self.bytes_read

    def _read_from_socket(self, length=None):
        socket_read_size = self.socket_read_size
        buf = self._buffer
        buf.seek(self.bytes_written)
        marker = 0

        try:
            while True:
                data = self._sock.recv(socket_read_size)
                if isinstance(data, bytes) and len(data) == 0:
                    raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
                buf.write(data)
                data_length = len(data)
                self.bytes_written += data_length
                marker += data_length

                if length is not None and length > marker:
                    continue
                break
        except socket.timeout:
            raise TimeoutError("Timeout reading from socket")
        except socket.error:
            e = sys.exc_info()[1]
            raise ConnectionError("Error while reading from socket: %s" %
                                  (e.args, ))

    def read(self, length):
        length = length + 2
        if length > self.length:
            self._read_from_socket(length - self.length)

        self._buffer.seek(self.bytes_read)
        data = self._buffer.read(length)
        self.bytes_read += len(data)

        if self.bytes_read == self.bytes_written:
            self.purge()

        return data[:-2]

    def readline(self):
        buf = self._buffer
        buf.seek(self.bytes_read)
        data = buf.readline()
        while not data.endswith(SYM_CRLF):
            self._read_from_socket()
            buf.seek(self.bytes_read)
            data = buf.readline()

        self.bytes_read += len(data)

        if self.bytes_read == self.bytes_written:
            self.purge()

        return data[:-2]

    def purge(self):
        self._buffer.seek(0)
        self._buffer.truncate()
        self.bytes_written = 0
        self.bytes_read = 0

    def close(self):
        try:
            self.purge()
            self._buffer.close()
        except Exception:
            pass

        self._buffer = None
        self._sock = None
Exemple #34
0
camera.start_preview()
time.sleep(2)
camera.resolution = (640, 480)
camera.framerate = 30
while True:
    try:
        client = server.accept()[0].makefile('wb')
        #(client, address) = server.accept()
        stream = BytesIO()
        camera.start_recording('/home/pi/FTP/files/vid1.h264',
                               resize=(1920, 1080))
        for foo in camera.capture_continuous(stream,
                                             'rgb',
                                             splitter_port=2,
                                             use_video_port=True):
            client.write(struct.pack('<L', stream.tell()))
            #client.sendall(struct.pack('<L', stream.tell()))
            client.flush()

            stream.seek(0)
            client.write(stream.read())
            #client.sendall(stream.read())

            stream.seek(0)
            stream.truncate()

    except socket.error:
        #server.close()
        camera.stop_recording()
        print('Client disconnected')
Exemple #35
0
class CurlHttpEventStream(object):
    def __init__(self, url, auth, verify):
        self.url = url
        self.received_buffer = BytesIO()

        headers = ['Cache-Control: no-cache', 'Accept: text/event-stream']

        self.curl = pycurl.Curl()
        self.curl.setopt(pycurl.URL, url)
        self.curl.setopt(pycurl.ENCODING, 'gzip')
        self.curl.setopt(pycurl.CONNECTTIMEOUT, 10)
        self.curl.setopt(pycurl.WRITEDATA, self.received_buffer)
        if auth and type(auth) is DCOSAuth:
            auth.refresh_auth_header()
            headers.append('Authorization: %s' % auth.auth_header)
        elif auth:
            self.curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
            self.curl.setopt(pycurl.USERPWD, '%s:%s' % auth)
        if verify:
            self.curl.setopt(pycurl.CA_INFO, verify)

        self.curl.setopt(pycurl.HTTPHEADER, headers)

        self.curlmulti = pycurl.CurlMulti()
        self.curlmulti.add_handle(self.curl)

        self.status_code = 0

    SELECT_TIMEOUT = 10

    def _any_data_received(self):
        return self.received_buffer.tell() != 0

    def _get_received_data(self):
        result = self.received_buffer.getvalue()
        self.received_buffer.truncate(0)
        self.received_buffer.seek(0)
        return result

    def _check_status_code(self):
        if self.status_code == 0:
            self.status_code = self.curl.getinfo(pycurl.HTTP_CODE)
        if self.status_code != 0 and self.status_code != 200:
            raise Exception(str(self.status_code) + ' ' + self.url)

    def _perform_on_curl(self):
        while True:
            ret, num_handles = self.curlmulti.perform()
            if ret != pycurl.E_CALL_MULTI_PERFORM:
                break
        return num_handles

    def _iter_chunks(self):
        while True:
            remaining = self._perform_on_curl()
            if self._any_data_received():
                self._check_status_code()
                yield self._get_received_data()
            if remaining == 0:
                break
            self.curlmulti.select(self.SELECT_TIMEOUT)

        self._check_status_code()
        self._check_curl_errors()

    def _check_curl_errors(self):
        for f in self.curlmulti.info_read()[2]:
            raise pycurl.error(*f[1:])

    def iter_lines(self):
        chunks = self._iter_chunks()
        return self._split_lines_from_chunks(chunks)

    @staticmethod
    def _split_lines_from_chunks(chunks):
        #same behaviour as requests' Response.iter_lines(...)

        pending = None
        for chunk in chunks:

            if pending is not None:
                chunk = pending + chunk
            lines = chunk.splitlines()

            if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
                pending = lines.pop()
            else:
                pending = None

            for line in lines:
                yield line

        if pending is not None:
            yield pending
Exemple #36
0
def speak(audioString):
    audioString = audioString.replace('-', ' ').replace('_', ' ')
    consequitivedots = re.compile(r'\.{2,}')
    audioString = consequitivedots.sub('.', audioString)
    sentence = sent(audioString)
    l = len(sentence)
    #	mixer.init()
    if tts_ == 'google1':
        mixer.init(54000, -16, 1, 4096)
        from io import BytesIO
        print('[MAIN] - NÓI')
        print('')
        print('TTS: Google')
        for i in range(0, l):
            text = str(sentence[i])
            # text = text[:]
            print(text)
            count = count_chars(text)
            print(count)
            if count > 0:
                print('Nội dung: ' + text)
                #Kiểm tra tồn tại file
                import os.path
                from os import path
                while True:
                    me = path.exists('tmp/google/' + text[:150] + '.mp3')
                    if me == False:
                        tts = gTTS(text, lang='vi')
                        tts.save('tmp/google/' + text[:150] + '.mp3')
                        time_sleep = 1
                        time_wait = 20
                        tcount = 0
                        while (me == False and tcount < time_wait):
                            time.sleep(time_sleep)
                            tts.save('tmp/google/' + text[:150] + '.mp3')
                            tcount += 1
                    else:
                        if seed == 1:
                            pixels.pixels.speak()
                        audio = MP3('tmp/google/' + text[:150] + '.mp3')
                        t = float(audio.info.length)
                        mixer.music.load('tmp/google/' + text[:150] + '.mp3')
                        mixer.music.play()
                        time.sleep(t)
                        break
#FPT
    elif tts_ == 'fpt':
        import random, requests
        for i in range(0, l):
            text = str(sentence[i])
            if len(text.strip()) < 1 or text.strip() == '.':
                pass
            else:
                print('Nội dung: ' + text)
                import os.path
                from os import path
                while True:
                    me = path.exists('tmp/fpt/' + text[:150] + '.mp3')
                    if me == False:
                        api_fpt_key = random.choice(api_fpt)
                        url = 'https://api.fpt.ai/hmi/tts/v5'
                        headers = {
                            'api_key': api_fpt_key,
                            'speed': '0',
                            'prosody': '1',
                            'voice': voice
                        }
                        payload = text.encode('utf-8')
                        url_return = requests.post(
                            url, data=payload, headers=headers).json()['async']
                        res_response = requests.get(url_return)
                        res_status = res_response.status_code
                        #						print (url_return)
                        time_sleep = 0.5
                        time_wait = 20
                        tcount = 0
                        while (res_status == 404 and tcount < time_wait):
                            time.sleep(time_sleep)
                            res_response = requests.get(url_return)
                            res_status = res_response.status_code
                            tcount += 1
                        if tcount == time_wait:
                            print('error tts')
                        with open('tmp/fpt/' + text[:150] + '.mp3', 'wb') as f:
                            f.write(res_response.content)
                            f.close()
                    else:
                        mixer.music.load('tmp/fpt/' + text[:150] + '.mp3')
                        mixer.music.play()
                        audio = MP3('tmp/fpt/' + text[:150] + '.mp3')
                        t = float(audio.info.length)
                        print('Time delay :' + str(t))
                        time.sleep(t)
                        break
#TTS VIETTEL
    elif tts_ == 'viettel':
        token = gih.get_config('token')
        import json, requests, os.path
        from playsound import playsound
        from pathlib import Path
        from pygame import mixer
        url = "https://vtcc.ai/voice/api/tts/v1/rest/syn"
        for i in range(0, l):
            text = str(sentence[i])
            if len(text.strip()) < 1 or text.strip() == '.':
                pass
            else:
                print('Nội dung: ' + text)
                import os.path
                from os import path
                while True:
                    from pydub import AudioSegment
                    from pydub.playback import play
                    me = path.exists('tmp/vtcc/' + text[:150] + '.wav')
                    if me == False:
                        data = {
                            "text": text,
                            "voice": "hn-quynhanh",
                            "id": "2",
                            "without_filter": False,
                            "speed": 1.0,
                            "tts_return_option": 2
                        }
                        headers = {
                            'Content-type': 'application/json',
                            'token': token
                        }
                        s = requests.Session()
                        dirname = os.path.dirname(os.path.abspath('_file_'))
                        cert_path = (dirname + '/wwwvtccai.crt')
                        response = requests.post(url,
                                                 data=json.dumps(data),
                                                 headers=headers,
                                                 verify=cert_path)
                        print(response.headers)
                        print(response.status_code)
                        res_status = response.status_code
                        time_sleep = 0.5
                        time_wait = 20
                        tcount = 0
                        while (res_status != 200 and tcount < time_wait):
                            time.sleep(time_sleep)
                            response = requests.post(url,
                                                     data=json.dumps(data),
                                                     headers=headers,
                                                     verify=cert_path)
                            res_status = res_response.status_code
                            tcount += 1
                        if tcount == time_wait:
                            print('error tts')
                        data = response.content
                        with open('tmp/vtcc/' + text[:150] + '.wav',
                                  "wb") as f:
                            f.write(data)
                            f.close()
                    else:
                        if seed == 1:
                            pixels.pixels.speak()
                        sound = AudioSegment.from_wav('tmp/vtcc/' +
                                                      text[:150] + '.wav')
                        play(sound)
                        break
# TTS Google1
    elif tts_ == 'google':
        from pygame import mixer
        mixer.init(54000, -16, 1, 4096)
        from io import BytesIO
        print('[MAIN] - NÓI')
        print('')
        print('TTS: Google')
        for i in range(0, l):
            text = str(sentence[i])
            # text = text[:]
            print(text)
            count = count_chars(text)
            print(count)
            if count > 0:
                print('Nội dung: ' + text)
                tts = gTTS(text, lang='vi')
                mp3_fp = BytesIO()
                tts.write_to_fp(mp3_fp)
                t = mp3_fp.truncate() / 5400
                t = float(t)
                mp3_fp.seek(0)
                mixer.init()
                if seed == 1:
                    pixels.pixels.speak()
                mixer.music.load(mp3_fp)
                mixer.music.play()
                mp3_fp.flush()
                mp3_fp.seek(0)
                time.sleep(t)
            else:
                pass
class IdzipWriter(IOStreamWrapperMixin):
    FILE_EXTENSION = 'dz'
    enforce_extension = True

    def __init__(self, output, sync_size=MAX_MEMBER_SIZE, mtime=None):
        if mtime is None:
            mtime = time.time()
        if isinstance(output, basestring):
            self.output = self._prepare_file_stream(output)
            self._should_close = True
        else:
            # hopefully a file like object
            if not check_file_like_for_writing(output):
                raise TypeError(
                    "`output` must be a file-like object supporting "
                    "write, tell, flush, and close!")
            self.output = output
            self._should_close = False
        self.input_buffer = BytesIO()
        try:
            name = path.abspath(self.output.name)
            basename = path.basename(name)
            self.name = name
            self.basename = basename.encode(fsencoding)
        except AttributeError:
            self.name = ""
            self.basename = self.name.encode(fsencoding)
        self.uncompressed_position = 0
        self.sync_size = sync_size
        self.mtime = int(mtime)
        self.compressobj = None
        self._reset_compressor()
        self.version = 1

    def _prepare_file_stream(self, path):
        if self.enforce_extension and not path.endswith(self.FILE_EXTENSION):
            path = "%s.%s" % (path, self.FILE_EXTENSION)
        return open(path, 'wb')

    @property
    def stream(self):
        return self.output

    def _make_compressor(self):
        return zlib.compressobj(COMPRESSION_LEVEL, zlib.DEFLATED,
                                -zlib.MAX_WBITS)

    def _reset_compressor(self):
        self.compressobj = self._make_compressor()

    def seek(self, offset, whence=SEEK_SET):
        raise UnsupportedOperation("Cannot seek on a write-only stream")

    def reset_buffer(self):
        # For truncate() to actually help us circumvent memory problems,
        # we need to seek to the beginning of the buffer, otherwise the
        # next call to write() will just pad the buffer with null bytes
        # up to the current position returned by tell()
        self.input_buffer.seek(0)
        self.input_buffer.truncate(0)

    def _get_buffer_size(self):
        curpos = self.input_buffer.tell()
        self.input_buffer.seek(0, SEEK_END)
        buffer_len = self.input_buffer.tell()
        self.input_buffer.seek(curpos)
        return buffer_len

    def _write_chunked(self, b, chunk_size=WRITE_BLOCK_SIZE):
        offset = 0
        total = len(b)
        i = 0
        while offset < total:
            s = b[offset:offset + chunk_size]
            self.input_buffer.write(s)
            self.uncompressed_position += len(s)
            i += 1
            offset += chunk_size
            if i % 2 == 0:
                self.sync()
                self.reset_buffer()
        return total

    def write(self, b):
        try:
            self.input_buffer.write(b)
        except MemoryError:
            # Attempting to write to the input buffer may cause
            # it to optimistically request a contiguous allocation
            # larger than the memory allocator can currently supply,
            # raising a MemoryError. This can be recovered from by
            # syncing the current buffer to disk, resetting the
            # input buffer, and trying to write again
            self.sync()
            self.reset_buffer()
            try:
                self.input_buffer.write(b)
            except MemoryError:
                # If the data to be written are so large that
                # they cannot be written to the input buffer in
                # one shot, try writing it in chunks. If *this*
                # runs out of memory, we're out of options.
                return self._write_chunked(b)
        self.uncompressed_position += len(b)
        buffer_len = self._get_buffer_size()
        if buffer_len >= self.sync_size:
            if self._check_member_size_valid(buffer_len):
                self.sync()
                self.reset_buffer()
            else:
                self._sync_chunked()
        return len(b)

    def sync(self):
        self.compress_member()
        self.reset_buffer()
        return self.output.tell()

    def _sync_chunked(self, flush=False):
        # accumulate chunks of the input buffer which are at most
        # `MAX_MEMBER_SIZE` bytes long
        valid_members = []
        self.input_buffer.seek(0)
        chunk = self.input_buffer.read(MAX_MEMBER_SIZE - 1)
        while chunk:
            valid_members.append(BytesIO(chunk))
            chunk = self.input_buffer.read(MAX_MEMBER_SIZE - 1)
        # if the input buffer was empty, then no valid members were
        # made so return immediately
        if not valid_members:
            return
        # if there is only one member, chunked syncing should be the same
        # as normal syncing, leaving the input buffer empty
        elif len(valid_members) == 1:
            self.input_buffer = valid_members[0]
            self.compress_member()
            self.reset_buffer()
        else:
            # otherwise, for all but the last valid member, sync those buffers
            # to the output stream, and then set the last valid member as the current
            # input buffer to avoid having a tiny leftover fragment.
            for chunk in valid_members[:-1]:
                self.input_buffer = chunk
                self.compress_member()
                self.reset_buffer()
            self.input_buffer = valid_members[-1]
            if flush:
                self.compress_member()
                self.reset_buffer()

    def tell(self):
        return self.uncompressed_position

    def flush(self):
        self.sync()
        return self.output.flush()

    def close(self):
        if not self.closed:
            self.sync()
            self.reset_buffer()
            if self._should_close:
                closing = self.output.close()
                return closing
        return None

    def _calculate_number_of_chunks_for_bytes(self, in_size):
        num_chunks = in_size // CHUNK_LENGTH
        if in_size % CHUNK_LENGTH != 0:
            num_chunks += 1
        return num_chunks

    def _calculate_extra_field_length(self, in_size):
        num_chunks = self._calculate_number_of_chunks_for_bytes(in_size)
        field_length = 3 * 2 + 2 * num_chunks
        return field_length

    def _calculate_header_extra_size(self, in_size):
        field_length = self._calculate_extra_field_length(in_size)
        extra_length = 2 * 2 + field_length
        return extra_length

    def _check_member_size_valid(self, in_size):
        extra_length = self._calculate_header_extra_size(in_size)
        return extra_length <= 0xffff

    def compress_member(self):
        """A gzip member contains:
        1) The header.
        2) The compressed data.
        """
        self.input_buffer.seek(0, SEEK_END)
        member_size = self.input_buffer.tell()
        self.input_buffer.seek(0)

        zlengths_pos = self._prepare_header(member_size)
        zlengths = self._compress_data(member_size)

        # Writes the lengths of compressed chunks to the header.
        end_pos = self.output.tell()
        self.output.seek(zlengths_pos)
        for zlen in zlengths:
            _write16(self.output, zlen)

        self.output.seek(end_pos)

    def _prepare_header(self, in_size):
        """Writes a prepared gzip header to the output.
        The gzip header is defined in RFC 1952.

        The gzip header starts with:
        +---+---+---+---+---+---+---+---+---+---+
        |x1f|x8b|x08|FLG|     MTIME     |XFL|OS |
        +---+---+---+---+---+---+---+---+---+---+
        where:
        FLG ... flags. FEXTRA|FNAME is used by idzip.
        MTIME ... the modification time of the original file or 0.
        XFL ... extra flags about the compression.
        OS ... operating system used for the compression.

        The next header sections are:
        1) Extra field, if the FEXTRA flag is set.
           Its format is described in _write_extra_field().
        2) The original file name, if the FNAME flag is set.
           The file name string is zero-terminated.
        """
        self.output.write(GZIP_DEFLATE_ID)
        flags = FEXTRA
        if self.basename:
            flags |= FNAME
        self.output.write(bytearray([flags]))

        # The mtime will be undefined if it does not fit.
        if self.mtime > 0xffffffff:
            mtime = 0
        else:
            mtime = self.mtime
        _write32(self.output, mtime)

        deflate_flags = b"\0"
        if COMPRESSION_LEVEL == zlib.Z_BEST_COMPRESSION:
            deflate_flags = b"\x02"  # slowest compression algorithm
        self.output.write(deflate_flags)
        self.output.write(bytearray([OS_CODE_UNIX]))

        zlengths_pos = self._write_extra_field(in_size)
        if self.basename:
            self.output.write(self.basename + b'\0')  # original basename

        return zlengths_pos

    def _write_extra_field(self, in_size):
        """Writes the dictzip extra field.
        It will be initiated with zeros on the place of
        the lengths of compressed chunks.

        The gzip extra field is present when the FEXTRA flag is set.
        RFC 1952 defines the used bytes:
        +---+---+================================+
        | XLEN  | XLEN bytes of "extra field" ...|
        +---+---+================================+

        Idzip adds only one subfield:
        +---+---+---+---+===============================+
        |'R'|'A'|  LEN  | LEN bytes of subfield data ...|
        +---+---+---+---+===============================+

        The subfield ID "RA" stands for Random Access.
        That subfield ID signalizes the dictzip gzip extension.
        The dictzip stores the length of uncompressed chunks
        and the lengths of compressed chunks to the gzip header:
        +---+---+---+---+---+---+==============================================+
        | VER=1 | CHLEN | CHCNT | CHCNT 2-byte lengths of compressed chunks ...|
        +---+---+---+---+---+---+==============================================+

        Two bytes are used to store a length of a compressed chunk.
        So the length of a compressed chunk has to be max 0xfffff.
        That puts a restriction on the CHLEN -- the length of
        uncompressed chunks. Dictzip uses CHLEN=58315.

        Only a fixed number of chunk lengths will fit to the gzip header.
        That limits the max file size of a dictzip file.
        Idzip does not have that limitation. It starts a new gzip member if needed.
        The new member would be also a valid dictzip file.
        """
        # num_chunks = in_size // CHUNK_LENGTH
        # if in_size % CHUNK_LENGTH != 0:
        #     num_chunks += 1
        num_chunks = self._calculate_number_of_chunks_for_bytes(in_size)
        # field_length = 3 * 2 + 2 * num_chunks
        field_length = self._calculate_extra_field_length(in_size)
        # extra_length = 2 * 2 + field_length
        extra_length = self._calculate_header_extra_size(in_size)
        assert extra_length <= 0xffff
        _write16(self.output, extra_length)  # XLEN

        # Dictzip extra field (Random Access)
        self.output.write(b"RA")
        _write16(self.output, field_length)
        _write16(self.output, self.version)  # version
        _write16(self.output, CHUNK_LENGTH)
        _write16(self.output, num_chunks)
        zlengths_pos = self.output.tell()
        self.output.write(b"\0\0" * num_chunks)
        return zlengths_pos

    def _compress_data(self, in_size):
        """Compresses the given number of input bytes to the output.
        The output consists of:
        1) The compressed data.
        2) 4 bytes of CRC.
        3) 4 bytes of file size.
        """
        assert in_size <= 0xffffffff
        zlengths = []
        crcval = zlib.crc32(b"")

        need = in_size
        while need > 0:
            read_size = min(need, CHUNK_LENGTH)
            chunk = self.input_buffer.read(read_size)
            if len(chunk) != read_size:
                raise IOError("Need %s bytes, got %s" %
                              (read_size, len(chunk)))

            need -= len(chunk)
            crcval = zlib.crc32(chunk, crcval)
            zlen = self._compress_chunk(chunk)
            zlengths.append(zlen)

        # An empty block with BFINAL=1 flag ends the zlib data stream.
        self.output.write(self.compressobj.flush(zlib.Z_FINISH))
        self._reset_compressor()
        _write32(self.output, crcval)
        _write32(self.output, in_size)
        return zlengths

    def _compress_chunk(self, chunk):
        data = self.compressobj.compress(chunk)
        zlen = len(data)
        self.output.write(data)

        data = self.compressobj.flush(zlib.Z_FULL_FLUSH)
        zlen += len(data)
        self.output.write(data)
        return zlen
def test_genZshFunction(self, cmdName, optionsFQPN):
    """
    Generate completion functions for given twisted command - no errors
    should be raised

    @type cmdName: C{str}
    @param cmdName: The name of the command-line utility e.g. 'twistd'

    @type optionsFQPN: C{str}
    @param optionsFQPN: The Fully Qualified Python Name of the C{Options}
        class to be tested.
    """
    outputFile = BytesIO()
    self.patch(usage.Options, "_shellCompFile", outputFile)

    # some scripts won't import or instantiate because of missing
    # dependencies (pyOpenSSL, etc) so we have to skip them.
    try:
        o = reflect.namedAny(optionsFQPN)()
    except Exception as e:
        raise unittest.SkipTest(
            "Couldn't import or instantiate " "Options class: %s" % (e,)
        )

    try:
        o.parseOptions(["", "--_shell-completion", "zsh:2"])
    except ImportError as e:
        # this can happen for commands which don't have all
        # the necessary dependencies installed. skip test.
        # skip
        raise unittest.SkipTest("ImportError calling parseOptions(): %s", (e,))
    except SystemExit:
        pass  # expected
    else:
        self.fail("SystemExit not raised")
    outputFile.seek(0)
    # test that we got some output
    self.assertEqual(1, len(outputFile.read(1)))
    outputFile.seek(0)
    outputFile.truncate()

    # now, if it has sub commands, we have to test those too
    if hasattr(o, "subCommands"):
        for (cmd, short, parser, doc) in o.subCommands:
            try:
                o.parseOptions([cmd, "", "--_shell-completion", "zsh:3"])
            except ImportError as e:
                # this can happen for commands which don't have all
                # the necessary dependencies installed. skip test.
                raise unittest.SkipTest(
                    "ImportError calling parseOptions() " "on subcommand: %s", (e,)
                )
            except SystemExit:
                pass  # expected
            else:
                self.fail("SystemExit not raised")

            outputFile.seek(0)
            # test that we got some output
            self.assertEqual(1, len(outputFile.read(1)))
            outputFile.seek(0)
            outputFile.truncate()

    # flushed because we don't want DeprecationWarnings to be printed when
    # running these test cases.
    self.flushWarnings()
Exemple #39
0
class _ResponseReader(protocol.Protocol):
    def __init__(self, finished, txresponse, request, maxsize, warnsize,
                 fail_on_dataloss, crawler):
        self._finished = finished
        self._txresponse = txresponse
        self._request = request
        self._bodybuf = BytesIO()
        self._maxsize = maxsize
        self._warnsize = warnsize
        self._fail_on_dataloss = fail_on_dataloss
        self._fail_on_dataloss_warned = False
        self._reached_warnsize = False
        self._bytes_received = 0
        self._certificate = None
        self._ip_address = None
        self._crawler = crawler

    def _finish_response(self, flags=None, failure=None):
        self._finished.callback({
            "txresponse": self._txresponse,
            "body": self._bodybuf.getvalue(),
            "flags": flags,
            "certificate": self._certificate,
            "ip_address": self._ip_address,
            "failure": failure,
        })

    def connectionMade(self):
        if self._certificate is None:
            with suppress(AttributeError):
                self._certificate = ssl.Certificate(
                    self.transport._producer.getPeerCertificate())

        if self._ip_address is None:
            self._ip_address = ipaddress.ip_address(
                self.transport._producer.getPeer().host)

    def dataReceived(self, bodyBytes):
        # This maybe called several times after cancel was called with buffered data.
        if self._finished.called:
            return

        self._bodybuf.write(bodyBytes)
        self._bytes_received += len(bodyBytes)

        bytes_received_result = self._crawler.signals.send_catch_log(
            signal=signals.bytes_received,
            data=bodyBytes,
            request=self._request,
            spider=self._crawler.spider,
        )
        for handler, result in bytes_received_result:
            if isinstance(result, Failure) and isinstance(
                    result.value, StopDownload):
                logger.debug(
                    "Download stopped for %(request)s from signal handler %(handler)s",
                    {
                        "request": self._request,
                        "handler": handler.__qualname__
                    })
                self.transport.stopProducing()
                self.transport._producer.loseConnection()
                failure = result if result.value.fail else None
                self._finish_response(flags=["download_stopped"],
                                      failure=failure)

        if self._maxsize and self._bytes_received > self._maxsize:
            logger.warning(
                "Received (%(bytes)s) bytes larger than download "
                "max size (%(maxsize)s) in request %(request)s.", {
                    'bytes': self._bytes_received,
                    'maxsize': self._maxsize,
                    'request': self._request
                })
            # Clear buffer earlier to avoid keeping data in memory for a long time.
            self._bodybuf.truncate(0)
            self._finished.cancel()

        if self._warnsize and self._bytes_received > self._warnsize and not self._reached_warnsize:
            self._reached_warnsize = True
            logger.warning(
                "Received more bytes than download "
                "warn size (%(warnsize)s) in request %(request)s.", {
                    'warnsize': self._warnsize,
                    'request': self._request
                })

    def connectionLost(self, reason):
        if self._finished.called:
            return

        if reason.check(ResponseDone):
            self._finish_response()
            return

        if reason.check(PotentialDataLoss):
            self._finish_response(flags=["partial"])
            return

        if reason.check(ResponseFailed) and any(
                r.check(_DataLoss) for r in reason.value.reasons):
            if not self._fail_on_dataloss:
                self._finish_response(flags=["dataloss"])
                return

            elif not self._fail_on_dataloss_warned:
                logger.warning(
                    "Got data loss in %s. If you want to process broken "
                    "responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
                    " -- This message won't be shown in further requests",
                    self._txresponse.request.absoluteURI.decode())
                self._fail_on_dataloss_warned = True

        self._finished.errback(reason)
class THttpClient(TTransportBase):
    """Http implementation of TTransport base."""

    def __init__(self, uri_or_host, port=None, path=None, cafile=None, cert_file=None, key_file=None, ssl_context=None):
        if port is not None:
            warnings.warn(
                "Please use the THttpClient('http{s}://host:port/path') constructor",
                DeprecationWarning,
                stacklevel=2)
            self.host = uri_or_host
            self.port = port
            assert path
            self.path = path
            self.scheme = 'http'
        else:
            parsed = urllib.parse.urlparse(uri_or_host)
            self.scheme = parsed.scheme
            assert self.scheme in ('http', 'https')
            if self.scheme == 'http':
                self.port = parsed.port or http_client.HTTP_PORT
            elif self.scheme == 'https':
                self.port = parsed.port or http_client.HTTPS_PORT
                self.certfile = cert_file
                self.keyfile = key_file
                self.context = ssl.create_default_context(cafile=cafile) if (cafile and not ssl_context) else ssl_context
            self.host = parsed.hostname
            self.path = parsed.path
            if parsed.query:
                self.path += '?%s' % parsed.query
        proxy = None
        self.realhost = self.realport = self.proxy_auth = None
        self.__wbuf = BytesIO()
        self.__rbuf = BytesIO()
        self.__http = None
        self.__http_response = None
        self.__timeout = None
        self.__custom_headers = None
        self.__time = time.time()
        

    def using_proxy(self):
        return self.realhost is not None

    def open(self):
        if self.scheme == 'http':
            self.__http = http_client.HTTPConnection(self.host, self.port,
                                                     timeout=self.__timeout)
        if self.scheme == 'https':
            self.__http = http_client.HTTPSConnection(self.host, self.port,
                                                      key_file=self.keyfile,
                                                      cert_file=self.certfile,
                                                      timeout=self.__timeout,
                                                      context=self.context)
            
    def close(self):
        self.__http.close()
        self.__http = None
        self.__http_response = None

    def isOpen(self):
        return self.__http is not None

    def setTimeout(self, ms):
        if ms is None:
            self.__timeout = None
        else:
            self.__timeout = ms / 1000.0

    def setCustomHeaders(self, headers):
        self.__custom_headers = headers

    def read(self, sz):
        return self.__rbuf.read(sz)

    def write(self, buf):
        self.__wbuf.write(buf)

    def flush(self):
        if not self.isOpen():
            self.open(); self.__time = time.time()
        elif time.time() - self.__time > 90:
            self.close(); self.open(); self.__time = time.time()
        data = self.__wbuf.getvalue()
        self.__wbuf.truncate(0)
        self.__wbuf.seek(0)
        self.__http.putrequest('POST', self.path)
        self.__http.putheader('Content-Type', 'application/x-thrift')
        self.__http.putheader('Content-Length', str(len(data)))
        if self.__custom_headers:
            for key, val in six.iteritems(self.__custom_headers):
                self.__http.putheader(key, val)
        self.__http.endheaders()
        self.__http.send(data)
        self.__rbuf = BytesIO(self.__http.getresponse().read())
Exemple #41
0
class Headers:

    header_size = 112
    chunk_size = 10**16

    max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    genesis_hash = b'9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
    target_timespan = 150
    checkpoints = HASHES
    first_block_timestamp = 1466646588  # block 1, as 0 is off by a lot
    timestamp_average_offset = 160.6855883050695  # calculated at 733447

    validate_difficulty: bool = True

    def __init__(self, path) -> None:
        self.io = None
        self.path = path
        self._size: Optional[int] = None
        self.chunk_getter: Optional[Callable] = None
        self.known_missing_checkpointed_chunks = set()
        self.check_chunk_lock = asyncio.Lock()

    async def open(self):
        self.io = BytesIO()
        if self.path != ':memory:':
            def _readit():
                if os.path.exists(self.path):
                    with open(self.path, 'r+b') as header_file:
                        self.io.seek(0)
                        self.io.write(header_file.read())
            await asyncio.get_event_loop().run_in_executor(None, _readit)
        bytes_size = self.io.seek(0, os.SEEK_END)
        self._size = bytes_size // self.header_size
        max_checkpointed_height = max(self.checkpoints.keys() or [-1]) + 1000
        if bytes_size % self.header_size:
            log.warning("Reader file size doesnt match header size. Repairing, might take a while.")
            await self.repair()
        else:
            # try repairing any incomplete write on tip from previous runs (outside of checkpoints, that are ok)
            await self.repair(start_height=max_checkpointed_height)
        await self.ensure_checkpointed_size()
        await self.get_all_missing_headers()

    async def close(self):
        if self.io is not None:
            def _close():
                flags = 'r+b' if os.path.exists(self.path) else 'w+b'
                with open(self.path, flags) as header_file:
                    header_file.write(self.io.getbuffer())
            await asyncio.get_event_loop().run_in_executor(None, _close)
            self.io.close()
            self.io = None

    @staticmethod
    def serialize(header):
        return b''.join([
            struct.pack('<I', header['version']),
            unhexlify(header['prev_block_hash'])[::-1],
            unhexlify(header['merkle_root'])[::-1],
            unhexlify(header['claim_trie_root'])[::-1],
            struct.pack('<III', header['timestamp'], header['bits'], header['nonce'])
        ])

    @staticmethod
    def deserialize(height, header):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits, nonce = struct.unpack('<III', header[100:112])
        return {
            'version': version,
            'prev_block_hash': hexlify(header[4:36][::-1]),
            'merkle_root': hexlify(header[36:68][::-1]),
            'claim_trie_root': hexlify(header[68:100][::-1]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': nonce,
            'block_height': height,
        }

    def get_next_chunk_target(self, chunk: int) -> ArithUint256:
        return ArithUint256(self.max_target)

    def get_next_block_target(self, max_target: ArithUint256, previous: Optional[dict],
                              current: Optional[dict]) -> ArithUint256:
        # https://github.com/lbryio/lbrycrd/blob/master/src/lbry.cpp
        if previous is None and current is None:
            return max_target
        if previous is None:
            previous = current
        actual_timespan = current['timestamp'] - previous['timestamp']
        modulated_timespan = self.target_timespan + int((actual_timespan - self.target_timespan) / 8)
        minimum_timespan = self.target_timespan - int(self.target_timespan / 8)  # 150 - 18 = 132
        maximum_timespan = self.target_timespan + int(self.target_timespan / 2)  # 150 + 75 = 225
        clamped_timespan = max(minimum_timespan, min(modulated_timespan, maximum_timespan))
        target = ArithUint256.from_compact(current['bits'])
        new_target = min(max_target, (target * clamped_timespan) / self.target_timespan)
        return new_target

    def __len__(self) -> int:
        return self._size

    def __bool__(self):
        return True

    async def get(self, height) -> dict:
        if isinstance(height, slice):
            raise NotImplementedError("Slicing of header chain has not been implemented yet.")
        try:
            return self.deserialize(height, await self.get_raw_header(height))
        except struct.error:
            raise IndexError(f"failed to get {height}, at {len(self)}")

    def estimated_timestamp(self, height, try_real_headers=True):
        if height <= 0:
            return
        if try_real_headers and self.has_header(height):
            offset = height * self.header_size
            return struct.unpack('<I', self.io.getbuffer()[offset + 100: offset + 104])[0]
        return int(self.first_block_timestamp + (height * self.timestamp_average_offset))

    def estimated_julian_day(self, height):
        return date_to_julian_day(date.fromtimestamp(self.estimated_timestamp(height, False)))

    async def get_raw_header(self, height) -> bytes:
        if self.chunk_getter:
            await self.ensure_chunk_at(height)
        if not 0 <= height <= self.height:
            raise IndexError(f"{height} is out of bounds, current height: {self.height}")
        return self._read(height)

    def _read(self, height, count=1):
        offset = height * self.header_size
        return bytes(self.io.getbuffer()[offset: offset + self.header_size * count])

    def chunk_hash(self, start, count):
        return self.hash_header(self._read(start, count)).decode()

    async def ensure_checkpointed_size(self):
        max_checkpointed_height = max(self.checkpoints.keys() or [-1])
        if self.height < max_checkpointed_height:
            self._write(max_checkpointed_height, bytes([0] * self.header_size * 1000))

    async def ensure_chunk_at(self, height):
        async with self.check_chunk_lock:
            if self.has_header(height):
                log.debug("has header %s", height)
                return
            return await self.fetch_chunk(height)

    async def fetch_chunk(self, height):
        log.info("on-demand fetching height %s", height)
        start = (height // 1000) * 1000
        headers = await self.chunk_getter(start)  # pylint: disable=not-callable
        chunk = (
            zlib.decompress(base64.b64decode(headers['base64']), wbits=-15, bufsize=600_000)
        )
        chunk_hash = self.hash_header(chunk).decode()
        if self.checkpoints.get(start) == chunk_hash:
            self._write(start, chunk)
            if start in self.known_missing_checkpointed_chunks:
                self.known_missing_checkpointed_chunks.remove(start)
            return
        elif start not in self.checkpoints:
            return  # todo: fixme
        raise Exception(
            f"Checkpoint mismatch at height {start}. Expected {self.checkpoints[start]}, but got {chunk_hash} instead."
        )

    def has_header(self, height):
        normalized_height = (height // 1000) * 1000
        if normalized_height in self.checkpoints:
            return normalized_height not in self.known_missing_checkpointed_chunks

        empty = '56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d'
        all_zeroes = '789d737d4f448e554b318c94063bbfa63e9ccda6e208f5648ca76ee68896557b'
        return self.chunk_hash(height, 1) not in (empty, all_zeroes)

    async def get_all_missing_headers(self):
        # Heavy operation done in one optimized shot
        for chunk_height, expected_hash in reversed(list(self.checkpoints.items())):
            if chunk_height in self.known_missing_checkpointed_chunks:
                continue
            if self.chunk_hash(chunk_height, 1000) != expected_hash:
                self.known_missing_checkpointed_chunks.add(chunk_height)
        return self.known_missing_checkpointed_chunks

    @property
    def height(self) -> int:
        return len(self)-1

    @property
    def bytes_size(self):
        return len(self) * self.header_size

    async def hash(self, height=None) -> bytes:
        return self.hash_header(
            await self.get_raw_header(height if height is not None else self.height)
        )

    @staticmethod
    def hash_header(header: bytes) -> bytes:
        if header is None:
            return b'0' * 64
        return hexlify(double_sha256(header)[::-1])

    async def connect(self, start: int, headers: bytes) -> int:
        added = 0
        bail = False
        for height, chunk in self._iterate_chunks(start, headers):
            try:
                # validate_chunk() is CPU bound and reads previous chunks from file system
                await self.validate_chunk(height, chunk)
            except InvalidHeader as e:
                bail = True
                chunk = chunk[:(height-e.height)*self.header_size]
            if chunk:
                added += self._write(height, chunk)
            if bail:
                break
        return added

    def _write(self, height, verified_chunk):
        self.io.seek(height * self.header_size, os.SEEK_SET)
        written = self.io.write(verified_chunk) // self.header_size
        # self.io.truncate()
        # .seek()/.write()/.truncate() might also .flush() when needed
        # the goal here is mainly to ensure we're definitely flush()'ing
        self.io.flush()
        self._size = max(self._size or 0, self.io.tell() // self.header_size)
        return written

    async def validate_chunk(self, height, chunk):
        previous_hash, previous_header, previous_previous_header = None, None, None
        if height > 0:
            raw = await self.get_raw_header(height-1)
            previous_header = self.deserialize(height-1, raw)
            previous_hash = self.hash_header(raw)
        if height > 1:
            previous_previous_header = await self.get(height-2)
        chunk_target = self.get_next_chunk_target(height // 2016 - 1)
        for current_hash, current_header in self._iterate_headers(height, chunk):
            block_target = self.get_next_block_target(chunk_target, previous_previous_header, previous_header)
            self.validate_header(height, current_hash, current_header, previous_hash, block_target)
            previous_previous_header = previous_header
            previous_header = current_header
            previous_hash = current_hash

    def validate_header(self, height: int, current_hash: bytes,
                        header: dict, previous_hash: bytes, target: ArithUint256):

        if previous_hash is None:
            if self.genesis_hash is not None and self.genesis_hash != current_hash:
                raise InvalidHeader(
                    height, f"genesis header doesn't match: {current_hash.decode()} "
                            f"vs expected {self.genesis_hash.decode()}")
            return

        if header['prev_block_hash'] != previous_hash:
            raise InvalidHeader(
                height, "previous hash mismatch: {} vs expected {}".format(
                    header['prev_block_hash'].decode(), previous_hash.decode())
            )

        if self.validate_difficulty:

            if header['bits'] != target.compact:
                raise InvalidHeader(
                    height, "bits mismatch: {} vs expected {}".format(
                        header['bits'], target.compact)
                )

            proof_of_work = self.get_proof_of_work(current_hash)
            if proof_of_work > target:
                raise InvalidHeader(
                    height, f"insufficient proof of work: {proof_of_work.value} vs target {target.value}"
                )

    async def repair(self, start_height=0):
        previous_header_hash = fail = None
        batch_size = 36
        for height in range(start_height, self.height, batch_size):
            headers = self._read(height, batch_size)
            if len(headers) % self.header_size != 0:
                headers = headers[:(len(headers) // self.header_size) * self.header_size]
            for header_hash, header in self._iterate_headers(height, headers):
                height = header['block_height']
                if previous_header_hash:
                    if header['prev_block_hash'] != previous_header_hash:
                        fail = True
                elif height == 0:
                    if header_hash != self.genesis_hash:
                        fail = True
                else:
                    # for sanity and clarity, since it is the only way we can end up here
                    assert start_height > 0 and height == start_height
                if fail:
                    log.warning("Header file corrupted at height %s, truncating it.", height - 1)
                    self.io.seek(max(0, (height - 1)) * self.header_size, os.SEEK_SET)
                    self.io.truncate()
                    self.io.flush()
                    self._size = self.io.seek(0, os.SEEK_END) // self.header_size
                    return
                previous_header_hash = header_hash

    @classmethod
    def get_proof_of_work(cls, header_hash: bytes):
        return ArithUint256(int(b'0x' + cls.header_hash_to_pow_hash(header_hash), 16))

    def _iterate_chunks(self, height: int, headers: bytes) -> Iterator[Tuple[int, bytes]]:
        assert len(headers) % self.header_size == 0, f"{len(headers)} {len(headers)%self.header_size}"
        start = 0
        end = (self.chunk_size - height % self.chunk_size) * self.header_size
        while start < end:
            yield height + (start // self.header_size), headers[start:end]
            start = end
            end = min(len(headers), end + self.chunk_size * self.header_size)

    def _iterate_headers(self, height: int, headers: bytes) -> Iterator[Tuple[bytes, dict]]:
        assert len(headers) % self.header_size == 0, len(headers)
        for idx in range(len(headers) // self.header_size):
            start, end = idx * self.header_size, (idx + 1) * self.header_size
            header = headers[start:end]
            yield self.hash_header(header), self.deserialize(height+idx, header)

    @staticmethod
    def header_hash_to_pow_hash(header_hash: bytes):
        header_hash_bytes = unhexlify(header_hash)[::-1]
        h = sha512(header_hash_bytes)
        pow_hash = double_sha256(
            ripemd160(h[:len(h) // 2]) +
            ripemd160(h[len(h) // 2:])
        )
        return hexlify(pow_hash[::-1])
Exemple #42
0
def localize(fp, **kw):
    """ Generate em:localized """
    kw = kw

    def sort(f):
        if "en-US" in f["locale"]:
            return 0, f["locale"]
        return 1, f["locale"]

    def get_locale_strings():
        locales = list()
        for f in sorted(files("chrome/locale/*/description.properties")):
            locale = dict(locale=(f.split("/", 3)[2], ))
            with open(f, "r", encoding="utf-8") as lp:
                for l in lp:
                    l = l.strip()
                    if not l or l.startswith("#"):
                        continue
                    k, v = l.split("=", 1)
                    k = k.split(".")
                    k = k[-2] if len(k[-1]) < 3 else k[-1]
                    if not k or not v:
                        continue
                    if k not in locale:
                        locale[k] = list()
                    locale[k] += v,
            locales += locale,
        return locales

    locales = get_locale_strings()

    with Reset(fp):
        rdf = XML(fp.read())

    # kill old localized
    for e in rdf.getElementsByTagNameNS(NS_EM, "localized"):
        e.parentNode.removeChild(e)

    def mapkey(k):
        v = list()
        for e in rdf.getElementsByTagNameNS(NS_EM, k):
            v += e.firstChild.data,
        return k, v

    keys = ("locale", "name", "description", "creator", "homepageURL",
            "developer", "translator", "contributor")
    baseprops = dict(mapkey(k) for k in keys)

    def add_props():
        parent = rdf.getElementsByTagNameNS(NS_EM, "id")[0].parentNode
        for props in sorted(locales, key=sort):
            node = rdf.createElementNS(NS_EM, "em:localized")
            desc = rdf.createElementNS(NS_RDF, "Description")
            for k in keys:
                vals = props.get(k, baseprops.get(k, list()))
                for v in vals:
                    n = rdf.createElementNS(NS_EM, "em:" + k)
                    n.appendChild(rdf.createTextNode(v))
                    desc.appendChild(n)
            parent.appendChild(rdf.createTextNode("\n\t\t"))
            node.appendChild(desc)
            parent.appendChild(node)
        parent.appendChild(rdf.createTextNode("\n\t"))

    add_props()

    io = BytesIO()
    with Reset(io):
        io.write(rdf.toxml(encoding="utf-8"))
        io.truncate()
    rdf.unlink()
    return io
class TCPROSTransport(Transport):
    """
    Generic implementation of TCPROS exchange routines for both topics and services
    """
    transport_type = 'TCPROS'

    def __init__(self, protocol, name, header=None):
        """
        ctor
        @param name str: identifier
        @param protocol TCPROSTransportProtocol protocol implementation    
        @param header dict: (optional) handshake header if transport handshake header was
        already read off of transport.
        @raise TransportInitError if transport cannot be initialized according to arguments
        """
        super(TCPROSTransport, self).__init__(protocol.direction, name=name)
        if not name:
            raise TransportInitError(
                "Unable to initialize transport: name is not set")

        self.protocol = protocol

        self.socket = None
        self.endpoint_id = 'unknown'
        self.dest_address = None  # for reconnection

        if python3 == 0:  # Python 2.x
            self.read_buff = StringIO()
            self.write_buff = StringIO()
        else:  # Python 3.x
            self.read_buff = BytesIO()
            self.write_buff = BytesIO()

        #self.write_buff = StringIO()
        self.header = header

        # #1852 have to hold onto latched messages on subscriber side
        self.is_latched = False
        self.latch = None

        # save the fileno separately so we can garbage collect the
        # socket but still unregister will poll objects
        self._fileno = None

        # these fields are actually set by the remote
        # publisher/service. they are set for tools that connect
        # without knowing the actual field name
        self.md5sum = None
        self.type = None

    def fileno(self):
        """
        Get descriptor for select
        """
        return self._fileno

    def set_socket(self, sock, endpoint_id):
        """
        Set the socket for this transport
        @param sock: socket
        @type  sock: socket.socket
        @param endpoint_id: identifier for connection endpoint
        @type  endpoint_id: str
        @raise TransportInitError: if socket has already been set
        """
        if self.socket is not None:
            raise TransportInitError("socket already initialized")
        self.socket = sock
        self.endpoint_id = endpoint_id
        self._fileno = sock.fileno()

    def connect(self, dest_addr, dest_port, endpoint_id, timeout=None):
        """
        Establish TCP connection to the specified
        address/port. connect() always calls L{write_header()} and
        L{read_header()} after the connection is made
        @param dest_addr: destination IP address
        @type  dest_addr: str
        @param dest_port: destination port
        @type  dest_port: int                
        @param endpoint_id: string identifier for connection (for statistics)
        @type  endpoint_id: str
        @param timeout: (optional keyword) timeout in seconds
        @type  timeout: float
        @raise TransportInitError: if unable to create connection
        """
        try:
            self.endpoint_id = endpoint_id
            self.dest_address = (dest_addr, dest_port)

            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            if _is_use_tcp_keepalive():
                # OSX (among others) does not define these options
                if hasattr(socket, 'TCP_KEEPCNT') and \
                   hasattr(socket, 'TCP_KEEPIDLE') and \
                   hasattr(socket, 'TCP_KEEPINTVL'):
                    # turn on KEEPALIVE
                    s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
                    # - # keepalive failures before actual connection failure
                    s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 9)
                    # - timeout before starting KEEPALIVE process
                    s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 60)
                    # - interval to send KEEPALIVE after IDLE timeout
                    s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 10)
            if timeout is not None:
                s.settimeout(timeout)
            self.socket = s
            self.socket.connect((dest_addr, dest_port))
            self.write_header()
            self.read_header()
        except TransportInitError as tie:
            rospyerr(
                "Unable to initiate TCP/IP socket to %s:%s (%s): %s" %
                (dest_addr, dest_port, endpoint_id, traceback.format_exc()))
            raise
        except Exception as e:
            #logerr("Unknown error initiating TCP/IP socket to %s:%s (%s): %s"%(dest_addr, dest_port, endpoint_id, str(e)))
            rospywarn(
                "Unknown error initiating TCP/IP socket to %s:%s (%s): %s" %
                (dest_addr, dest_port, endpoint_id, traceback.format_exc()))

            # FATAL: no reconnection as error is unknown
            self.done = True
            if self.socket:
                try:
                    self.socket.shutdown(socket.SHUT_RDWR)
                except:
                    pass
                finally:
                    self.socket.close()
            self.socket = None

            raise TransportInitError(str(e))  #re-raise i/o error

    def _validate_header(self, header):
        """
        Validate header and initialize fields accordingly
        @param header: header fields from publisher
        @type  header: dict
        @raise TransportInitError: if header fails to validate
        """
        self.header = header
        if 'error' in header:
            raise TransportInitError("remote error reported: %s" %
                                     header['error'])
        for required in ['md5sum', 'type']:
            if not required in header:
                raise TransportInitError("header missing required field [%s]" %
                                         required)
        self.md5sum = header['md5sum']
        self.type = header['type']
        if header.get('latching', '0') == '1':
            self.is_latched = True

    def write_header(self):
        """Writes the TCPROS header to the active connection."""
        # socket may still be getting spun up, so wait for it to be writable
        sock = self.socket
        protocol = self.protocol
        # race condition on close, better fix is to pass these in,
        # functional style, but right now trying to cause minimal
        # perturbance to codebase.
        if sock is None or protocol is None:
            return
        fileno = sock.fileno()
        ready = None
        while not ready:
            _, ready, _ = select.select([], [fileno], [])
        logger.debug("[%s]: writing header", self.name)
        sock.setblocking(1)
        self.stat_bytes += write_ros_handshake_header(
            sock, protocol.get_header_fields())

    def read_header(self):
        """
        Read TCPROS header from active socket
        @raise TransportInitError if header fails to validate
        """
        sock = self.socket
        if sock is None:
            return
        sock.setblocking(1)
        self._validate_header(
            read_ros_handshake_header(sock, self.read_buff,
                                      self.protocol.buff_size))

    def send_message(self, msg, seq):
        """
        Convenience routine for services to send a message across a
        particular connection. NOTE: write_data is much more efficient
        if same message is being sent to multiple connections. Not
        threadsafe.
        @param msg: message to send
        @type  msg: Msg
        @param seq: sequence number for message
        @type  seq: int
        @raise TransportException: if error occurred sending message
        """
        # this will call write_data(), so no need to keep track of stats
        serialize_message(self.write_buff, seq, msg)
        self.write_data(self.write_buff.getvalue())
        self.write_buff.truncate(0)

    def write_data(self, data):
        """
        Write raw data to transport
        @raise TransportInitialiationError: could not be initialized
        @raise TransportTerminated: no longer open for publishing
        """
        if not self.socket:
            raise TransportInitError(
                "TCPROS transport was not successfully initialized")
        if self.done:
            raise TransportTerminated("connection closed")
        try:
            #TODO: get rid of sendalls and replace with async-style publishing
            self.socket.sendall(data)
            self.stat_bytes += len(data)
            self.stat_num_msg += 1
        except IOError as ioe:
            #for now, just document common errno's in code
            (errno, msg) = ioe.args
            if errno == 32:  #broken pipe
                logdebug("ERROR: Broken Pipe")
                self.close()
                raise TransportTerminated(str(errno) + msg)
            raise  #re-raise
        except socket.error as se:
            #for now, just document common errno's in code
            (errno, msg) = se.args
            if errno == 32:  #broken pipe
                logdebug("[%s]: Closing connection [%s] due to broken pipe",
                         self.name, self.endpoint_id)
                self.close()
                raise TransportTerminated(msg)
            elif errno == 104:  #connection reset by peer
                logdebug("[%s]: Peer [%s] has closed connection", self.name,
                         self.endpoint_id)
                self.close()
                raise TransportTerminated(msg)
            else:
                rospydebug("unknown socket error writing data: %s",
                           traceback.format_exc())
                logdebug(
                    "[%s]: closing connection [%s] due to unknown socket error: %s",
                    self.name, self.endpoint_id, msg)
                self.close()
                raise TransportTerminated(str(errno) + ' ' + msg)
        return True

    def receive_once(self):
        """
        block until messages are read off of socket
        @return: list of newly received messages
        @rtype: [Msg]
        @raise TransportException: if unable to receive message due to error
        """
        sock = self.socket
        if sock is None:
            raise TransportException("connection not initialized")
        b = self.read_buff
        msg_queue = []
        p = self.protocol
        try:
            sock.setblocking(1)
            while not msg_queue and not self.done and not is_shutdown():
                if b.tell() >= 4:
                    p.read_messages(b, msg_queue, sock)
                if not msg_queue:
                    recv_buff(sock, b, p.buff_size)
            self.stat_num_msg += len(msg_queue)  #STATS
            # set the _connection_header field
            for m in msg_queue:
                m._connection_header = self.header

            # #1852: keep track of last latched message
            if self.is_latched and msg_queue:
                self.latch = msg_queue[-1]

            return msg_queue

        except DeserializationError as e:
            rospyerr(traceback.format_exc())
            raise TransportException(
                "receive_once[%s]: DeserializationError %s" %
                (self.name, str(e)))
        except TransportTerminated as e:
            raise  #reraise
        except ServiceException as e:
            raise
        except Exception as e:
            rospyerr(traceback.format_exc())
            raise TransportException("receive_once[%s]: unexpected error %s" %
                                     (self.name, str(e)))
        return retval

    def _reconnect(self):
        # This reconnection logic is very hacky right now.  I need to
        # rewrite the I/O core so that this is handled more centrally.

        if self.dest_address is None:
            raise ROSInitException(
                "internal error with reconnection state: address not stored")

        interval = 0.5  # seconds
        while self.socket is None and not self.done and not rospy.is_shutdown(
        ):
            try:
                # set a timeout so that we can continue polling for
                # exit.  30. is a bit high, but I'm concerned about
                # embedded platforms.  To do this properly, we'd have
                # to move to non-blocking routines.
                self.connect(self.dest_address[0],
                             self.dest_address[1],
                             self.endpoint_id,
                             timeout=30.)
            except TransportInitError:
                self.socket = None

            if self.socket is None:
                # exponential backoff
                interval = interval * 2

            time.sleep(interval)

    def receive_loop(self, msgs_callback):
        """
        Receive messages until shutdown
        @param msgs_callback: callback to invoke for new messages received    
        @type  msgs_callback: fn([msg])
        """
        # - use assert here as this would be an internal error, aka bug
        logger.debug("receive_loop for [%s]", self.name)
        try:
            while not self.done and not is_shutdown():
                try:
                    if self.socket is not None:
                        msgs = self.receive_once()
                        if not self.done and not is_shutdown():
                            msgs_callback(msgs)
                    else:
                        self._reconnect()

                except TransportException as e:
                    # set socket to None so we reconnect
                    try:
                        if self.socket is not None:
                            try:
                                self.socket.shutdown()
                            except:
                                pass
                            finally:
                                self.socket.close()
                    except:
                        pass
                    self.socket = None

                except DeserializationError as e:
                    #TODO: how should we handle reconnect in this case?

                    logerr(
                        "[%s] error deserializing incoming request: %s" %
                        self.name, str(e))
                    rospyerr(
                        "[%s] error deserializing incoming request: %s" %
                        self.name, traceback.format_exc())
                except:
                    # in many cases this will be a normal hangup, but log internally
                    try:
                        #1467 sometimes we get exceptions due to
                        #interpreter shutdown, so blanket ignore those if
                        #the reporting fails
                        rospydebug(
                            "exception in receive loop for [%s], may be normal. Exception is %s",
                            self.name, traceback.format_exc())
                    except:
                        pass

            rospydebug("receive_loop[%s]: done condition met, exited loop" %
                       self.name)
        finally:
            if not self.done:
                self.close()

    def close(self):
        """close i/o and release resources"""
        if not self.done:
            try:
                if self.socket is not None:
                    try:
                        self.socket.shutdown(socket.SHUT_RDWR)
                    except:
                        pass
                    finally:
                        self.socket.close()
            finally:
                self.socket = self.read_buff = self.write_buff = self.protocol = None
                super(TCPROSTransport, self).close()
Exemple #44
0
class THeaderTransport(TTransportBase, CReadableTransport):
    """Transport that sends headers.  Also understands framed/unframed/HTTP
    transports and will do the right thing"""

    __max_frame_size = MAX_FRAME_SIZE

    # Defaults to current user, but there is also a setter below.
    __identity = None
    IDENTITY_HEADER = "identity"
    ID_VERSION_HEADER = "id_version"
    ID_VERSION = "1"

    def __init__(self, trans, client_types=None, client_type=None):
        self.__trans = trans
        self.__rbuf = StringIO()
        self.__rbuf_frame = False
        self.__wbuf = StringIO()
        self.seq_id = 0
        self.__flags = 0
        self.__read_transforms = []
        self.__write_transforms = []
        self.__supported_client_types = set(client_types
                                            or (CLIENT_TYPE.HEADER, ))
        self.__proto_id = T_COMPACT_PROTOCOL  # default to compact like c++
        self.__client_type = client_type or CLIENT_TYPE.HEADER
        self.__read_headers = {}
        self.__read_persistent_headers = {}
        self.__write_headers = {}
        self.__write_persistent_headers = {}

        self.__supported_client_types.add(self.__client_type)

        # If we support unframed binary / framed binary also support compact
        if CLIENT_TYPE.UNFRAMED_DEPRECATED in self.__supported_client_types:
            self.__supported_client_types.add(
                CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED)
        if CLIENT_TYPE.FRAMED_DEPRECATED in self.__supported_client_types:
            self.__supported_client_types.add(CLIENT_TYPE.FRAMED_COMPACT)

    def set_header_flag(self, flag):
        self.__flags |= flag

    def clear_header_flag(self, flag):
        self.__flags &= ~flag

    def header_flags(self):
        return self.__flags

    def set_max_frame_size(self, size):
        if size > MAX_BIG_FRAME_SIZE:
            raise TTransportException(
                TTransportException.INVALID_FRAME_SIZE,
                "Cannot set max frame size > %s" % MAX_BIG_FRAME_SIZE)
        if size > MAX_FRAME_SIZE and self.__client_type != CLIENT_TYPE.HEADER:
            raise TTransportException(
                TTransportException.INVALID_FRAME_SIZE,
                "Cannot set max frame size > %s for clients other than HEADER"
                % MAX_FRAME_SIZE)
        self.__max_frame_size = size

    def get_peer_identity(self):
        if self.IDENTITY_HEADER in self.__read_headers:
            if self.__read_headers[self.ID_VERSION_HEADER] == self.ID_VERSION:
                return self.__read_headers[self.IDENTITY_HEADER]
        return None

    def set_identity(self, identity):
        self.__identity = identity

    def get_protocol_id(self):
        return self.__proto_id

    def set_protocol_id(self, proto_id):
        self.__proto_id = proto_id

    def set_header(self, str_key, str_value):
        self.__write_headers[str_key] = str_value

    def get_write_headers(self):
        return self.__write_headers

    def get_headers(self):
        return self.__read_headers

    def clear_headers(self):
        self.__write_headers.clear()

    def set_persistent_header(self, str_key, str_value):
        self.__write_persistent_headers[str_key] = str_value

    def get_write_persistent_headers(self):
        return self.__write_persistent_headers

    def clear_persistent_headers(self):
        self.__write_persistent_headers.clear()

    def add_transform(self, trans_id):
        self.__write_transforms.append(trans_id)

    def _reset_protocol(self):
        # HTTP calls that are one way need to flush here.
        if self.__client_type == CLIENT_TYPE.HTTP_SERVER:
            self.flush()
        # set to anything except unframed
        self.__client_type = CLIENT_TYPE.UNKNOWN
        # Read header bytes to check which protocol to decode
        self.readFrame(0)

    def getTransport(self):
        return self.__trans

    def isOpen(self):
        return self.getTransport().isOpen()

    def open(self):
        return self.getTransport().open()

    def close(self):
        return self.getTransport().close()

    def read(self, sz):
        ret = self.__rbuf.read(sz)
        if len(ret) == sz:
            return ret

        if self.__client_type in (CLIENT_TYPE.UNFRAMED_DEPRECATED,
                                  CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED):
            return ret + self.getTransport().readAll(sz - len(ret))

        self.readFrame(sz - len(ret))
        return ret + self.__rbuf.read(sz - len(ret))

    readAll = read  # TTransportBase.readAll does a needless copy here.

    def readFrame(self, req_sz):
        self.__rbuf_frame = True
        word1 = self.getTransport().readAll(4)
        sz = unpack('!I', word1)[0]
        proto_id = word1[0] if PY3 else ord(word1[0])
        if proto_id == TBinaryProtocol.PROTOCOL_ID:
            # unframed
            self.__client_type = CLIENT_TYPE.UNFRAMED_DEPRECATED
            self.__proto_id = T_BINARY_PROTOCOL
            if req_sz <= 4:  # check for reads < 0.
                self.__rbuf = StringIO(word1)
            else:
                self.__rbuf = StringIO(word1 +
                                       self.getTransport().read(req_sz - 4))
        elif proto_id == TCompactProtocol.PROTOCOL_ID:
            self.__client_type = CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED
            self.__proto_id = T_COMPACT_PROTOCOL
            if req_sz <= 4:  # check for reads < 0.
                self.__rbuf = StringIO(word1)
            else:
                self.__rbuf = StringIO(word1 +
                                       self.getTransport().read(req_sz - 4))
        elif sz == HTTP_SERVER_MAGIC:
            self.__client_type = CLIENT_TYPE.HTTP_SERVER
            mf = self.getTransport().handle.makefile('rb', -1)

            self.handler = RequestHandler(mf, 'client_address:port', '')
            self.header = self.handler.wfile
            self.__rbuf = StringIO(self.handler.data)
        else:
            if sz == BIG_FRAME_MAGIC:
                sz = unpack('!Q', self.getTransport().readAll(8))[0]
            # could be header format or framed.  Check next two bytes.
            magic = self.getTransport().readAll(2)
            proto_id = magic[0] if PY3 else ord(magic[0])
            if proto_id == TCompactProtocol.PROTOCOL_ID:
                self.__client_type = CLIENT_TYPE.FRAMED_COMPACT
                self.__proto_id = T_COMPACT_PROTOCOL
                _frame_size_check(sz, self.__max_frame_size, header=False)
                self.__rbuf = StringIO(magic +
                                       self.getTransport().readAll(sz - 2))
            elif proto_id == TBinaryProtocol.PROTOCOL_ID:
                self.__client_type = CLIENT_TYPE.FRAMED_DEPRECATED
                self.__proto_id = T_BINARY_PROTOCOL
                _frame_size_check(sz, self.__max_frame_size, header=False)
                self.__rbuf = StringIO(magic +
                                       self.getTransport().readAll(sz - 2))
            elif magic == PACKED_HEADER_MAGIC:
                self.__client_type = CLIENT_TYPE.HEADER
                _frame_size_check(sz, self.__max_frame_size)
                # flags(2), seq_id(4), header_size(2)
                n_header_meta = self.getTransport().readAll(8)
                self.__flags, self.seq_id, header_size = unpack(
                    '!HIH', n_header_meta)
                data = StringIO()
                data.write(magic)
                data.write(n_header_meta)
                data.write(self.getTransport().readAll(sz - 10))
                data.seek(10)
                self.read_header_format(sz - 10, header_size, data)
            else:
                self.__client_type = CLIENT_TYPE.UNKNOWN
                raise TTransportException(
                    TTransportException.INVALID_CLIENT_TYPE,
                    "Could not detect client transport type")

        if self.__client_type not in self.__supported_client_types:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Client type {} not supported on server".format(
                    self.__client_type))

    def read_header_format(self, sz, header_size, data):
        # clear out any previous transforms
        self.__read_transforms = []

        header_size = header_size * 4
        if header_size > sz:
            raise TTransportException(TTransportException.INVALID_FRAME_SIZE,
                                      "Header size is larger than frame")
        end_header = header_size + data.tell()

        self.__proto_id = readVarint(data)
        num_headers = readVarint(data)

        if self.__proto_id == 1 and self.__client_type != \
                CLIENT_TYPE.HTTP_SERVER:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Trying to recv JSON encoding over binary")

        # Read the headers.  Data for each header varies.
        for _ in range(0, num_headers):
            trans_id = readVarint(data)
            if trans_id in (TRANSFORM.ZLIB, TRANSFORM.SNAPPY, TRANSFORM.ZSTD):
                self.__read_transforms.insert(0, trans_id)
            elif trans_id == TRANSFORM.HMAC:
                raise TApplicationException(
                    TApplicationException.INVALID_TRANSFORM,
                    "Hmac transform is no longer supported: %i" % trans_id)
            else:
                # TApplicationException will be sent back to client
                raise TApplicationException(
                    TApplicationException.INVALID_TRANSFORM,
                    "Unknown transform in client request: %i" % trans_id)

        # Clear out previous info headers.
        self.__read_headers.clear()

        # Read the info headers.
        while data.tell() < end_header:
            info_id = readVarint(data)
            if info_id == INFO.NORMAL:
                _read_info_headers(data, end_header, self.__read_headers)
            elif info_id == INFO.PERSISTENT:
                _read_info_headers(data, end_header,
                                   self.__read_persistent_headers)
            else:
                break  # Unknown header.  Stop info processing.

        if self.__read_persistent_headers:
            self.__read_headers.update(self.__read_persistent_headers)

        # Skip the rest of the header
        data.seek(end_header)

        payload = data.read(sz - header_size)

        # Read the data section.
        self.__rbuf = StringIO(self.untransform(payload))

    def write(self, buf):
        self.__wbuf.write(buf)

    def transform(self, buf):
        for trans_id in self.__write_transforms:
            if trans_id == TRANSFORM.ZLIB:
                buf = zlib.compress(buf)
            elif trans_id == TRANSFORM.SNAPPY:
                buf = snappy.compress(buf)
            elif trans_id == TRANSFORM.ZSTD:
                buf = zstd.ZstdCompressor(
                    write_content_size=True).compress(buf)
            else:
                raise TTransportException(
                    TTransportException.INVALID_TRANSFORM,
                    "Unknown transform during send")
        return buf

    def untransform(self, buf):
        for trans_id in self.__read_transforms:
            if trans_id == TRANSFORM.ZLIB:
                buf = zlib.decompress(buf)
            elif trans_id == TRANSFORM.SNAPPY:
                buf = snappy.decompress(buf)
            elif trans_id == TRANSFORM.ZSTD:
                buf = zstd.ZstdDecompressor().decompress(buf)
            if trans_id not in self.__write_transforms:
                self.__write_transforms.append(trans_id)
        return buf

    def flush(self):
        self.flushImpl(False)

    def onewayFlush(self):
        self.flushImpl(True)

    def _flushHeaderMessage(self, buf, wout, wsz):
        """Write a message for CLIENT_TYPE.HEADER

        @param buf(StringIO): Buffer to write message to
        @param wout(str): Payload
        @param wsz(int): Payload length
        """
        transform_data = StringIO()
        # For now, all transforms don't require data.
        num_transforms = len(self.__write_transforms)
        for trans_id in self.__write_transforms:
            transform_data.write(getVarint(trans_id))

        # Add in special flags.
        if self.__identity:
            self.__write_headers[self.ID_VERSION_HEADER] = self.ID_VERSION
            self.__write_headers[self.IDENTITY_HEADER] = self.__identity

        info_data = StringIO()

        # Write persistent kv-headers
        _flush_info_headers(info_data, self.get_write_persistent_headers(),
                            INFO.PERSISTENT)

        # Write non-persistent kv-headers
        _flush_info_headers(info_data, self.__write_headers, INFO.NORMAL)

        header_data = StringIO()
        header_data.write(getVarint(self.__proto_id))
        header_data.write(getVarint(num_transforms))

        header_size = transform_data.tell() + header_data.tell() + \
            info_data.tell()

        padding_size = 4 - (header_size % 4)
        header_size = header_size + padding_size

        # MAGIC(2) | FLAGS(2) + SEQ_ID(4) + HEADER_SIZE(2)
        wsz += header_size + 10
        if wsz > MAX_FRAME_SIZE:
            buf.write(pack("!I", BIG_FRAME_MAGIC))
            buf.write(pack("!Q", wsz))
        else:
            buf.write(pack("!I", wsz))
        buf.write(pack("!HH", HEADER_MAGIC >> 16, self.__flags))
        buf.write(pack("!I", self.seq_id))
        buf.write(pack("!H", header_size // 4))

        buf.write(header_data.getvalue())
        buf.write(transform_data.getvalue())
        buf.write(info_data.getvalue())

        # Pad out the header with 0x00
        for _ in range(0, padding_size, 1):
            buf.write(pack("!c", b'\0'))

        # Send data section
        buf.write(wout)

    def flushImpl(self, oneway):
        wout = self.__wbuf.getvalue()
        wout = self.transform(wout)
        wsz = len(wout)

        # reset wbuf before write/flush to preserve state on underlying failure
        self.__wbuf.seek(0)
        self.__wbuf.truncate()

        if self.__proto_id == 1 and self.__client_type != CLIENT_TYPE.HTTP_SERVER:
            raise TTransportException(
                TTransportException.INVALID_CLIENT_TYPE,
                "Trying to send JSON encoding over binary")

        buf = StringIO()
        if self.__client_type == CLIENT_TYPE.HEADER:
            self._flushHeaderMessage(buf, wout, wsz)
        elif self.__client_type in (CLIENT_TYPE.FRAMED_DEPRECATED,
                                    CLIENT_TYPE.FRAMED_COMPACT):
            buf.write(pack("!i", wsz))
            buf.write(wout)
        elif self.__client_type in (CLIENT_TYPE.UNFRAMED_DEPRECATED,
                                    CLIENT_TYPE.UNFRAMED_COMPACT_DEPRECATED):
            buf.write(wout)
        elif self.__client_type == CLIENT_TYPE.HTTP_SERVER:
            # Reset the client type if we sent something -
            # oneway calls via HTTP expect a status response otherwise
            buf.write(self.header.getvalue())
            buf.write(wout)
            self.__client_type == CLIENT_TYPE.HEADER
        elif self.__client_type == CLIENT_TYPE.UNKNOWN:
            raise TTransportException(TTransportException.INVALID_CLIENT_TYPE,
                                      "Unknown client type")

        # We don't include the framing bytes as part of the frame size check
        frame_size = buf.tell() - (4 if wsz < MAX_FRAME_SIZE else 12)
        _frame_size_check(frame_size,
                          self.__max_frame_size,
                          header=self.__client_type == CLIENT_TYPE.HEADER)
        self.getTransport().write(buf.getvalue())
        if oneway:
            self.getTransport().onewayFlush()
        else:
            self.getTransport().flush()

    # Implement the CReadableTransport interface.
    @property
    def cstringio_buf(self):
        if not self.__rbuf_frame:
            self.readFrame(0)
        return self.__rbuf

    def cstringio_refill(self, prefix, reqlen):
        # self.__rbuf will already be empty here because fastproto doesn't
        # ask for a refill until the previous buffer is empty.  Therefore,
        # we can start reading new frames immediately.

        # On unframed clients, there is a chance there is something left
        # in rbuf, and the read pointer is not advanced by fastproto
        # so seek to the end to be safe
        self.__rbuf.seek(0, 2)
        while len(prefix) < reqlen:
            prefix += self.read(reqlen)
        self.__rbuf = StringIO(prefix)
        return self.__rbuf
Exemple #45
0
class REL:
    def __init__(self):
        self.data = BytesIO()

        self.id = None
        self.sections = []
        self.name_offset = 0
        self.name_length = 0
        self.rel_format_version = 3

        self.bss_size = 0

        self.relocation_entries_for_module = OrderedDict()

        self.prolog_section = 0
        self.epilog_section = 0
        self.unresolved_section = 0
        self.prolog_offset = 0
        self.epilog_offset = 0
        self.unresolved_offset = 0

        self.alignment = 8
        self.bss_alignment = 1
        self.fix_size = 0

    def read_from_file(self, file_path):
        with open(file_path, "rb") as file:
            data = BytesIO(file.read())

        self.read(data)

    def read(self, data):
        self.data = data

        data = self.data

        self.id = read_u32(data, 0)

        self.sections = []
        self.num_sections = read_u32(data, 0xC)
        self.section_info_table_offset = read_u32(data, 0x10)
        for section_index in range(0, self.num_sections):
            section_info_offset = self.section_info_table_offset + section_index * RELSection.ENTRY_SIZE
            section = RELSection()
            section.read(data, section_info_offset)
            self.sections.append(section)

        self.name_offset = read_u32(data, 0x14)
        self.name_length = read_u32(data, 0x18)
        self.rel_format_version = read_u32(data, 0x1C)

        self.bss_size = read_u32(data, 0x20)

        relocation_data_offset_for_module = OrderedDict()
        self.relocation_table_offset = read_u32(data, 0x24)
        self.imp_table_offset = read_u32(data, 0x28)
        self.imp_table_length = read_u32(data, 0x2C)
        offset = self.imp_table_offset
        while offset < self.imp_table_offset + self.imp_table_length:
            module_num = read_u32(data, offset)
            relocation_data_offset = read_u32(data, offset + 4)
            relocation_data_offset_for_module[
                module_num] = relocation_data_offset
            offset += 8

        self.relocation_entries_for_module = OrderedDict()
        curr_section_num = None
        for module_num, relocation_data_offset in relocation_data_offset_for_module.items(
        ):
            self.relocation_entries_for_module[module_num] = []

            offset = relocation_data_offset
            prev_relocation_offset = 0
            while True:
                relocation_type = RELRelocationType(read_u8(data, offset + 2))
                if relocation_type == RELRelocationType.R_DOLPHIN_END:
                    break

                relocation_data_entry = RELRelocation()
                relocation_data_entry.read(data, offset,
                                           prev_relocation_offset,
                                           curr_section_num)
                prev_relocation_offset = relocation_data_entry.relocation_offset

                if relocation_data_entry.relocation_type == RELRelocationType.R_DOLPHIN_SECTION:
                    curr_section_num = relocation_data_entry.section_num_to_relocate_against
                    prev_relocation_offset = 0
                else:
                    self.relocation_entries_for_module[module_num].append(
                        relocation_data_entry)

                offset += RELRelocation.ENTRY_SIZE

        self.prolog_section = read_u8(data, 0x30)
        self.epilog_section = read_u8(data, 0x31)
        self.unresolved_section = read_u8(data, 0x32)
        self.prolog_offset = read_u32(data, 0x34)
        self.epilog_offset = read_u32(data, 0x38)
        self.unresolved_offset = read_u32(data, 0x3C)

        self.alignment = read_u32(data, 0x40)
        self.bss_alignment = read_u32(data, 0x44)

        # Space after this fix_size offset can be reused for other purposes.
        # Such as using the space that originally had the relocations list for .bss static variables instead.
        self.fix_size = read_u32(data, 0x48)

        self.bss_section_index = None  # The byte at offset 0x33 in the REL is reserved for this value at runtime.
        for section_index, section in enumerate(self.sections):
            if section.is_bss:
                self.bss_section_index = section_index
                section.offset = self.bss_offset
                break

    @property
    def bss_offset(self):
        # BSS doesn't start until the next 0x20 byte alignment after the end of the initialized data (specified by fix_size).
        return (self.fix_size + 0x1F) & ~(0x1F)

    def convert_rel_offset_to_section_index_and_relative_offset(self, offset):
        section_index = None
        relative_offset = None

        for section in self.sections:
            if section.is_uninitialized:
                continue

            if section.offset <= offset < section.offset + section.length:
                section_index = self.sections.index(section)
                relative_offset = offset - section.offset
                break

        if section_index is None:
            raise Exception(
                "Offset %04X is not in the data for any of the REL sections" %
                offset)

        return (section_index, relative_offset)

    def convert_rel_offset_to_section_data_and_relative_offset(self, offset):
        data = None
        relative_offset = None

        for section in self.sections:
            if section.is_uninitialized:
                continue

            if section.offset <= offset < section.offset + section.length:
                data = section.data
                relative_offset = offset - section.offset
                break

        return (data, relative_offset)

    def read_data(self, read_callback, offset, *args):
        # This function allows reading from a REL using the offset from the start of the REL file, instead of needing to know the section index and offset within that section.

        data, relative_offset = self.convert_rel_offset_to_section_data_and_relative_offset(
            offset)

        if data is None:
            raise Exception(
                "Offset %04X is not in the data for any of the REL sections" %
                offset)

        return read_callback(data, relative_offset, *args)

    def write_data(self,
                   write_callback,
                   offset,
                   *args,
                   delete_relocations=False):
        # This function allows writing to a REL using the offset from the start of the REL file, instead of needing to know the section index and offset within that section.

        data, relative_offset = self.convert_rel_offset_to_section_data_and_relative_offset(
            offset)

        if data is None:
            raise Exception(
                "Offset %04X is not in the data for any of the REL sections" %
                offset)

        write_callback(data, relative_offset, *args)

    def delete_relocation_in_range(self, offset, length):
        for module_num, relocations in self.relocation_entries_for_module.items(
        ):
            relocations_to_delete = []

            for relocation in relocations:
                curr_section = self.sections[relocation.curr_section_num]
                relocation_absolute_offset = relocation.relocation_offset + curr_section.offset

                if relocation_absolute_offset >= offset and relocation_absolute_offset < offset + length:
                    relocations_to_delete.append(relocation)

            for relocation in relocations_to_delete:
                relocations.remove(relocation)

    def save_to_file(self, file_path, preserve_section_data_offsets=False):
        self.save_changes(
            preserve_section_data_offsets=preserve_section_data_offsets)

        with open(file_path, "wb") as f:
            f.write(read_all_bytes(self.data))

    def save_changes(self, preserve_section_data_offsets=False):
        self.data.truncate(0)
        data = self.data

        write_u32(data, 0x00, self.id)
        write_u32(data, 0x04, 0)
        write_u32(data, 0x08, 0)
        self.num_sections = len(self.sections)
        write_u32(data, 0x0C, self.num_sections)
        write_u32(data, 0x14, self.name_offset)
        write_u32(data, 0x18, self.name_length)
        write_u32(data, 0x1C, self.rel_format_version)
        write_u32(
            data, 0x20,
            self.bss_size)  # TODO recalculate this properly when necessary

        self.section_info_table_offset = 0x4C
        write_u32(data, 0x10, self.section_info_table_offset)
        next_section_info_offset = self.section_info_table_offset
        next_section_data_offset = self.section_info_table_offset + self.num_sections * RELSection.ENTRY_SIZE
        next_section_data_offset = pad_offset_to_nearest(
            next_section_data_offset, 4
        )  # TODO why is 4 more accurate here than the 8 from self.alignment?
        for section_index, section in enumerate(self.sections):
            if preserve_section_data_offsets:
                if section.is_uninitialized:
                    # An uninitialized section.
                    # We don't need to preserve the data offsets for these. Do nothing.
                    pass
                else:
                    assert section.offset >= next_section_data_offset
                    next_section_data_offset = section.offset

            section.save(data, next_section_info_offset,
                         next_section_data_offset, self.bss_size)
            next_section_info_offset += RELSection.ENTRY_SIZE
            if not section.is_bss:
                next_section_data_offset += section.length

            next_section_data_offset = pad_offset_to_nearest(
                next_section_data_offset, 4)

        # We need to reorder the relocations list before writing it so that relocations against this current REL and relocations against main come after relocations against other RELs.
        # This is because the game assumes those two are always last, and shrinks the size of the imp table to not include those and anything after them upon first relocation (because those two are guaranteed to be complete on first relocation, unlike relocations against any other REL).
        if self.id in self.relocation_entries_for_module:
            relocations_against_this_rel = self.relocation_entries_for_module.pop(
                self.id)
            self.relocation_entries_for_module[
                self.id] = relocations_against_this_rel
        if 0 in self.relocation_entries_for_module:
            relocations_against_main = self.relocation_entries_for_module.pop(
                0)
            self.relocation_entries_for_module[0] = relocations_against_main

        self.imp_table_offset = data_len(data)
        imp_table_size = len(self.relocation_entries_for_module) * 8
        imp_table_end = self.imp_table_offset + imp_table_size
        self.relocation_table_offset = imp_table_end
        self.fix_size = self.relocation_table_offset
        write_u32(data, 0x24, self.relocation_table_offset)
        write_u32(data, 0x28, self.imp_table_offset)
        write_u32(data, 0x2C, imp_table_size)
        next_imp_offset = self.imp_table_offset
        next_relocation_entry_offset = self.relocation_table_offset
        for module_num, relocation_data_entries in self.relocation_entries_for_module.items(
        ):
            write_u32(data, next_imp_offset + 0x00, module_num)
            write_u32(data, next_imp_offset + 0x04,
                      next_relocation_entry_offset)
            next_imp_offset += 8

            # Sort the relocations first by their section, then by their offset within the section.
            relocation_data_entries.sort(key=lambda reloc: (
                reloc.curr_section_num, reloc.relocation_offset))

            curr_section_num = None
            prev_relocation_offset = 0
            for relocation_data_entry in relocation_data_entries:
                if relocation_data_entry.curr_section_num != curr_section_num:
                    curr_section_num = relocation_data_entry.curr_section_num
                    prev_relocation_offset = 0

                    section_start_entry = RELRelocation()
                    section_start_entry.relocation_type = RELRelocationType.R_DOLPHIN_SECTION
                    section_start_entry.section_num_to_relocate_against = curr_section_num

                    section_start_entry.save(data,
                                             next_relocation_entry_offset)
                    next_relocation_entry_offset += RELRelocation.ENTRY_SIZE

                offset_diff = relocation_data_entry.relocation_offset - prev_relocation_offset
                while offset_diff > 0xFFFF:
                    # The offset change is stored as a halfword, so if the gap is too large, we must insert a NOP command (or several) to bridge the gap.
                    nop_entry = RELRelocation()
                    nop_entry.relocation_type = RELRelocationType.R_DOLPHIN_NOP

                    relocation_data_entry.relocation_offset = prev_relocation_offset + 0xFFFF
                    relocation_data_entry.offset_of_curr_relocation_from_prev = 0xFFFF
                    nop_entry.save(data, next_relocation_entry_offset)
                    prev_relocation_offset = relocation_data_entry.relocation_offset
                    next_relocation_entry_offset += RELRelocation.ENTRY_SIZE

                    offset_diff -= 0xFFFF

                if offset_diff < 0:
                    raise Exception(
                        "Negative offset difference between relocation. Relocations not properly sorted."
                    )

                relocation_data_entry.offset_of_curr_relocation_from_prev = offset_diff
                relocation_data_entry.save(data, next_relocation_entry_offset)
                prev_relocation_offset = relocation_data_entry.relocation_offset
                next_relocation_entry_offset += RELRelocation.ENTRY_SIZE

            table_end_entry = RELRelocation()
            table_end_entry.relocation_type = RELRelocationType.R_DOLPHIN_END
            table_end_entry.section_num_to_relocate_against = curr_section_num

            table_end_entry.save(data, next_relocation_entry_offset)
            next_relocation_entry_offset += RELRelocation.ENTRY_SIZE

            if module_num != 0 and module_num != self.id:
                # Normally fix_size wouldn't need to include any of the relocation table in it, because all relocations happen when the REL is first loaded, and the whole relocation table can be repurposed afterwards.
                # But when the REL has relocations against a module besides main.dol and itself, that is no longer the case.
                # Relocations against a different REL can happen after this REL is initially loaded, so we need to include those relocations in fix_size.
                # Only relocations after the end of the last REL-to-REL relocation can be repurposed.
                self.fix_size = next_relocation_entry_offset

        write_u8(data, 0x30, self.prolog_section)
        write_u8(data, 0x31, self.epilog_section)
        write_u8(data, 0x32, self.unresolved_section)
        write_u32(data, 0x34, self.prolog_offset)
        write_u32(data, 0x38, self.epilog_offset)
        write_u32(data, 0x3C, self.unresolved_offset)

        write_u32(data, 0x40, self.alignment)
        write_u32(data, 0x44, self.bss_alignment)
        # TODO: align bss to the bss_alignment

        write_u32(data, 0x48, self.fix_size)
def captureMotion():
    global motionMonitorStream, motionDetected, camera, cfgPreMotionCapture, cfgPostMotionCapture
    global cfgDeviceEnabled
    global currentlyRecording

    # Max length of video to capture
    streamMaxSeconds = 60

    # Time between checking for motion
    timeBetweenMotionChecks = 5

    # While keeps loop running
    while True:
        # Check if we should not detect motion
        if not cfgDeviceEnabled:
            logDebug("Device not enabled")
        elif motionDetected:
            logDebug("Starting recording of motion")
            currentlyRecording = True

            # Copy x amount of seconds to a new BytesIO buffer and then split recording immediately after to avoid
            # delay in video
            recordingStream = BytesIO()
            motionMonitorStream.copy_to(recordingStream,
                                        seconds=cfgPreMotionCapture)
            camera.split_recording(recordingStream)

            # Clean up motion monitor and setup time
            motionMonitorStream.clear()

            # Setup recording time
            recordingTime = 0 + cfgPreMotionCapture + cfgPostMotionCapture

            # Capture thumbnail
            thumbnailStream = BytesIO()
            camera.capture(thumbnailStream,
                           format='jpeg',
                           use_video_port=True,
                           resize=(320, 240))
            thumbnailStream.seek(0)

            # The idea here is to keep recording while we detect motion or until we run out of room to record
            # The only way to exit this loop is after we wend a recording
            while True:
                sendRecording = False

                if motionDetected:
                    logDebug("Recorded total of " + str(recordingTime) +
                             " seconds")

                    # Reset motion, then sleep, if it happens again then we will keep recording
                    motionDetected = False  # VERY IMPORTANT! Must exist before sleep, otherwise you will set it and then quickly evaluate it
                    camera.wait_recording(
                        timeBetweenMotionChecks
                    )  # If set too low, you may get more choppy videos motion may not be detected within 1 second
                    recordingTime = recordingTime + timeBetweenMotionChecks

                    # If we go beyond the time we have available, then we need to send the recording out and start new
                    # Uploading 30 second chunks at at time sends a manageable amount to the server
                    if recordingTime > streamMaxSeconds:
                        logDebug("Reached max recording at " +
                                 str(recordingTime) + " seconds, starting new")
                        sendRecording = True
                else:
                    logDebug("Motion no longer detected at " +
                             str(recordingTime) + " seconds, sending capture")
                    sendRecording = True
                    currentlyRecording = False

                if sendRecording:
                    logDebug("Sending Recording of " + str(recordingTime) +
                             " seconds")

                    # Capture the post motion
                    camera.wait_recording(cfgPostMotionCapture)

                    # Copy recording stream to transferToServerStream
                    transferToServerStream = BytesIO()
                    recordingStream.seek(0)
                    transferToServerStream.write(recordingStream.read())
                    transferToServerStream.seek(0)

                    # Jump back to motionMonitorStream briefly to clear recording stream
                    camera.split_recording(motionMonitorStream)

                    # Clear our recording stream by seeking to 0 and then truncate
                    recordingStream.seek(0)
                    recordingStream.truncate()

                    # If we are still recording, we need to jump back to recording stream
                    if currentlyRecording:
                        camera.split_recording(recordingStream)

                    # Send the video to the server
                    t = threading.Thread(target=postVideo,
                                         args=(transferToServerStream,
                                               recordingTime, thumbnailStream))
                    t.start()

                    # If we are still recording, we should continue on, otherwise we should break
                    if currentlyRecording:
                        continue
                    else:
                        break

        # Wait one second until checking again and reset motion
        motionDetected = False  # This is reset in case motion was detected but the camera was not enabled
        sleep(1)
Exemple #47
0
    def run(self):
        #results = {} # {'file' : error_code,...}

        STATE_DONE = 0
        STATE_ABORTED = 10
        STATE_SUCCESS = 20
        STATE_BUSY = 25
        STATE_READ_SENDER_INFO = 30
        STATE_PRERENDER = 40
        STATE_COUNT_PAGES = 50
        STATE_NEXT_RECIPIENT = 60
        STATE_COVER_PAGE = 70
        STATE_SINGLE_FILE = 80
        STATE_MERGE_FILES = 90
        STATE_SINGLE_FILE = 100
        STATE_SEND_FAX = 110
        STATE_CLEANUP = 120
        STATE_ERROR = 130

        next_recipient = self.next_recipient_gen()

        state = STATE_READ_SENDER_INFO
        self.rendered_file_list = []

        while state != STATE_DONE:  # --------------------------------- Fax state machine
            if self.check_for_cancel():
                state = STATE_ABORTED

            log.debug("STATE=(%d, 0, 0)" % state)

            if state == STATE_ABORTED:  # --------------------------------- Aborted (10, 0, 0)
                log.error("Aborted by user.")
                self.write_queue((STATUS_IDLE, 0, ''))
                state = STATE_CLEANUP

            elif state == STATE_SUCCESS:  # --------------------------------- Success (20, 0, 0)
                log.debug("Success.")
                self.write_queue((STATUS_COMPLETED, 0, ''))
                state = STATE_CLEANUP

            elif state == STATE_ERROR:  # --------------------------------- Error (130, 0, 0)
                log.error("Error, aborting.")
                self.write_queue((STATUS_ERROR, 0, ''))
                state = STATE_CLEANUP

            elif state == STATE_BUSY:  # --------------------------------- Busy (25, 0, 0)
                log.error("Device busy, aborting.")
                self.write_queue((STATUS_BUSY, 0, ''))
                state = STATE_CLEANUP

            elif state == STATE_READ_SENDER_INFO:  # --------------------------------- Get sender info (30, 0, 0)
                log.debug("%s State: Get sender info" % ("*" * 20))
                state = STATE_PRERENDER
                try:
                    try:
                        self.dev.open()
                    except Error as e:
                        log.error("Unable to open device (%s)." % e.msg)
                        state = STATE_ERROR
                    else:
                        try:
                            self.sender_name = self.dev.station_name
                            log.debug("Sender name=%s" % self.sender_name)
                            self.sender_fax = self.dev.phone_num
                            log.debug("Sender fax=%s" % self.sender_fax)
                        except Error:
                            log.error("PML get failed!")
                            state = STATE_ERROR

                finally:
                    self.dev.close()

            elif state == STATE_PRERENDER:  # --------------------------------- Pre-render non-G3 files (40, 0, 0)
                log.debug("%s State: Pre-render non-G3 files" % ("*" * 20))
                state = self.pre_render(STATE_COUNT_PAGES)

            elif state == STATE_COUNT_PAGES:  # --------------------------------- Get total page count (50, 0, 0)
                log.debug("%s State: Get total page count" % ("*" * 20))
                state = self.count_pages(STATE_NEXT_RECIPIENT)

            elif state == STATE_NEXT_RECIPIENT:  # --------------------------------- Loop for multiple recipients (60, 0, 0)
                log.debug("%s State: Next recipient" % ("*" * 20))
                state = STATE_COVER_PAGE

                try:
                    recipient = next(next_recipient)
                    #print recipient
                    log.debug("Processing for recipient %s" %
                              recipient['name'])

                    self.write_queue(
                        (STATUS_SENDING_TO_RECIPIENT, 0, recipient['name']))

                except StopIteration:
                    state = STATE_SUCCESS
                    log.debug("Last recipient.")
                    continue

                self.recipient_file_list = self.rendered_file_list[:]

            elif state == STATE_COVER_PAGE:  # --------------------------------- Create cover page (70, 0, 0)
                log.debug("%s State: Render cover page" % ("*" * 20))
                state = self.cover_page(recipient)

            elif state == STATE_SINGLE_FILE:  # --------------------------------- Special case for single file (no merge) (80, 0, 0)
                log.debug("%s State: Handle single file" % ("*" * 20))
                state = self.single_file(STATE_SEND_FAX)

            elif state == STATE_MERGE_FILES:  # --------------------------------- Merge multiple G3 files (90, 0, 0)
                log.debug("%s State: Merge multiple files" % ("*" * 20))
                state = self.merge_files(STATE_SEND_FAX)

            elif state == STATE_SEND_FAX:  # --------------------------------- Send fax state machine (110, 0, 0)
                log.debug("%s State: Send fax" % ("*" * 20))
                state = STATE_NEXT_RECIPIENT

                FAX_SEND_STATE_DONE = 0
                FAX_SEND_STATE_ABORT = 10
                FAX_SEND_STATE_ERROR = 20
                FAX_SEND_STATE_BUSY = 25
                FAX_SEND_STATE_SUCCESS = 30
                FAX_SEND_STATE_DEVICE_OPEN = 40
                FAX_SEND_STATE_SET_TOKEN = 50
                FAX_SEND_STATE_EARLY_OPEN = 60
                FAX_SEND_STATE_SET_PARAMS = 70
                FAX_SEND_STATE_CHECK_IDLE = 80
                FAX_SEND_STATE_START_REQUEST = 90
                FAX_SEND_STATE_LATE_OPEN = 100
                FAX_SEND_STATE_SEND_DIAL_STRINGS = 110
                FAX_SEND_STATE_SEND_FAX_HEADER = 120
                FAX_SEND_STATE_SEND_PAGES = 130
                FAX_SEND_STATE_SEND_END_OF_STREAM = 140
                FAX_SEND_STATE_WAIT_FOR_COMPLETE = 150
                FAX_SEND_STATE_RESET_TOKEN = 160
                FAX_SEND_STATE_CLOSE_SESSION = 170

                monitor_state = False
                error_state = pml.DN_ERROR_NONE
                fax_send_state = FAX_SEND_STATE_DEVICE_OPEN

                while fax_send_state != FAX_SEND_STATE_DONE:

                    if self.check_for_cancel():
                        log.error("Fax send aborted.")
                        fax_send_state = FAX_SEND_STATE_ABORT

                    if monitor_state:
                        fax_state = self.getFaxDownloadState()
                        if not fax_state in (pml.UPDN_STATE_XFERACTIVE,
                                             pml.UPDN_STATE_XFERDONE):
                            log.error("D/L error state=%d" % fax_state)
                            fax_send_state = FAX_SEND_STATE_ERROR
                            state = STATE_ERROR

                    log.debug("STATE=(%d, %d, 0)" %
                              (STATE_SEND_FAX, fax_send_state))

                    if fax_send_state == FAX_SEND_STATE_ABORT:  # -------------- Abort (110, 10, 0)
                        # TODO: Set D/L state to ???
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_RESET_TOKEN
                        state = STATE_ABORTED

                    elif fax_send_state == FAX_SEND_STATE_ERROR:  # -------------- Error (110, 20, 0)
                        log.error("Fax send error.")
                        error_state = self.getFaxDownloadError()
                        log.debug(
                            "Error State=%d (%s)" %
                            (error_state,
                             pml.DN_ERROR_STR.get(error_state, "Unknown")))
                        monitor_state = False

                        fax_send_state = FAX_SEND_STATE_RESET_TOKEN
                        state = STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_BUSY:  # -------------- Busy (110, 25, 0)
                        log.error("Fax device busy.")
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_RESET_TOKEN
                        state = STATE_BUSY

                    elif fax_send_state == FAX_SEND_STATE_SUCCESS:  # -------------- Success (110, 30, 0)
                        log.debug("Fax send success.")
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_RESET_TOKEN
                        state = STATE_NEXT_RECIPIENT

                    elif fax_send_state == FAX_SEND_STATE_DEVICE_OPEN:  # -------------- Device open (110, 40, 0)
                        log.debug("%s State: Open device" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SET_TOKEN
                        try:
                            self.dev.open()
                        except Error as e:
                            log.error("Unable to open device (%s)." % e.msg)
                            fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            if self.dev.device_state == DEVICE_STATE_NOT_FOUND:
                                fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_SET_TOKEN:  # -------------- Acquire fax token (110, 50, 0)
                        log.debug("%s State: Acquire fax token" % ("*" * 20))
                        try:
                            result_code, token = self.dev.getPML(
                                pml.OID_FAX_TOKEN)
                        except Error:
                            log.debug("Unable to acquire fax token (1).")
                            fax_send_state = FAX_SEND_STATE_EARLY_OPEN
                        else:
                            if result_code > pml.ERROR_MAX_OK:
                                fax_send_state = FAX_SEND_STATE_EARLY_OPEN
                                log.debug("Skipping token acquisition.")
                            else:
                                token = time.strftime("%d%m%Y%H:%M:%S",
                                                      time.gmtime())
                                log.debug("Setting token: %s" % token)
                                try:
                                    self.dev.setPML(pml.OID_FAX_TOKEN, token)
                                except Error:
                                    log.error(
                                        "Unable to acquire fax token (2).")
                                    fax_send_state = FAX_SEND_STATE_ERROR
                                else:
                                    result_code, check_token = self.dev.getPML(
                                        pml.OID_FAX_TOKEN)

                                    if check_token == token:
                                        fax_send_state = FAX_SEND_STATE_EARLY_OPEN
                                    else:
                                        log.error(
                                            "Unable to acquire fax token (3).")
                                        fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_EARLY_OPEN:  # -------------- Early open (newer models) (110, 60, 0)
                        log.debug("%s State: Early open" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_CHECK_IDLE

                        if self.dev.fax_type == FAX_TYPE_BLACK_SEND_EARLY_OPEN:  # newer
                            log.debug("Opening fax channel.")
                            try:
                                self.dev.openFax()
                            except Error as e:
                                log.error("Unable to open channel (%s)." %
                                          e.msg)
                                fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            log.debug("Skipped.")

                    elif fax_send_state == FAX_SEND_STATE_CHECK_IDLE:  # -------------- Check for initial idle (110, 80, 0)
                        log.debug("%s State: Check idle" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_START_REQUEST

                        dl_state = self.getFaxDownloadState()
                        tx_status = self.getFaxJobTxStatus()
                        rx_status = self.getFaxJobRxStatus()

                        if ((dl_state == pml.UPDN_STATE_IDLE or \
                            dl_state == pml.UPDN_STATE_ERRORABORT or \
                            dl_state == pml.UPDN_STATE_XFERDONE) and \
                            (tx_status == pml.FAXJOB_TX_STATUS_IDLE or tx_status == pml.FAXJOB_TX_STATUS_DONE) and \
                            (rx_status == pml.FAXJOB_RX_STATUS_IDLE or rx_status == pml.FAXJOB_RX_STATUS_DONE)):

                            # xwas if state == pml.UPDN_STATE_IDLE:
                            if dl_state == pml.UPDN_STATE_IDLE:
                                log.debug("Starting in idle state")
                            else:
                                log.debug("Resetting to idle...")
                                self.dev.setPML(pml.OID_FAX_DOWNLOAD,
                                                pml.UPDN_STATE_IDLE)
                                time.sleep(0.5)
                        else:
                            fax_send_state = FAX_SEND_STATE_BUSY

                    elif fax_send_state == FAX_SEND_STATE_START_REQUEST:  # -------------- Request fax start (110, 90, 0)
                        log.debug("%s State: Request start" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SET_PARAMS

                        dl_state = self.getFaxDownloadState()

                        if dl_state == pml.UPDN_STATE_IDLE:
                            log.debug("Try: 0")
                            log.debug(
                                "Setting to up/down state request start...")
                            self.dev.setPML(pml.OID_FAX_DOWNLOAD,
                                            pml.UPDN_STATE_REQSTART)
                            time.sleep(1)

                            log.debug("Waiting for active state...")
                            i = 1

                            while i < 10:
                                log.debug("Try: %d" % i)
                                try:
                                    dl_state = self.getFaxDownloadState()
                                except Error:
                                    log.error("PML/SNMP error")
                                    fax_send_state = FAX_SEND_STATE_ERROR
                                    break

                                if dl_state == pml.UPDN_STATE_XFERACTIVE:
                                    break

                                time.sleep(1)
                                log.debug(
                                    "Setting to up/down state request start..."
                                )
                                self.dev.setPML(pml.OID_FAX_DOWNLOAD,
                                                pml.UPDN_STATE_REQSTART)

                                i += 1

                            else:
                                log.error("Could not get into active state!")
                                fax_send_state = FAX_SEND_STATE_BUSY

                            monitor_state = True

                        else:
                            log.error("Could not get into idle state!")
                            fax_send_state = FAX_SEND_STATE_BUSY

                    elif fax_send_state == FAX_SEND_STATE_SET_PARAMS:  # -------------- Set fax send params (110, 70, 0)
                        log.debug("%s State: Set params" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_LATE_OPEN

                        try:
                            self.dev.setPML(pml.OID_DEV_DOWNLOAD_TIMEOUT,
                                            pml.DEFAULT_DOWNLOAD_TIMEOUT)
                            self.dev.setPML(pml.OID_FAXJOB_TX_TYPE,
                                            pml.FAXJOB_TX_TYPE_HOST_ONLY)
                            log.debug("Setting date and time on device.")
                            self.dev.setDateAndTime()
                        except Error as e:
                            log.error("PML/SNMP error (%s)" % e.msg)
                            fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_LATE_OPEN:  # -------------- Late open (older models) (110, 100, 0)
                        log.debug("%s State: Late open" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SEND_DIAL_STRINGS

                        if self.dev.fax_type == FAX_TYPE_BLACK_SEND_LATE_OPEN:  # older
                            log.debug("Opening fax channel.")
                            try:
                                self.dev.openFax()
                            except Error:
                                log.error("Unable to open channel.")
                                fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            log.debug("Skipped.")

                    elif fax_send_state == FAX_SEND_STATE_SEND_DIAL_STRINGS:  # -------------- Dial strings (110, 110, 0)
                        log.debug("%s State: Send dial strings" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SEND_FAX_HEADER

                        log.debug("Dialing: %s" % recipient['fax'])

                        log.debug("Sending dial strings...")
                        self.create_mfpdtf_fixed_header(
                            DT_DIAL_STRINGS, True, PAGE_FLAG_NEW_DOC
                            | PAGE_FLAG_END_DOC | PAGE_FLAG_END_STREAM
                        )  # 0x1c on Windows, we were sending 0x0c
                        #print recipient
                        dial_strings = recipient['fax'].encode('ascii')
                        log.debug(repr(dial_strings))
                        self.create_mfpdtf_dial_strings(dial_strings)

                        try:
                            self.write_stream()
                        except Error:
                            log.error("Channel write error.")
                            fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_SEND_FAX_HEADER:  # -------------- Fax header (110, 120, 0)
                        log.debug("%s State: Send fax header" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SEND_PAGES

                        try:
                            ff = open(self.f, 'rb')
                        except IOError:
                            log.error("Unable to read fax file.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                            continue

                        try:
                            header = ff.read(FILE_HEADER_SIZE)
                        except IOError:
                            log.error("Unable to read fax file.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                            continue

                        magic, version, total_pages, hort_dpi, vert_dpi, page_size, \
                            resolution, encoding, reserved1, reserved2 = self.decode_fax_header(header)

                        if magic != b'hplip_g3':
                            log.error("Invalid file header. Bad magic.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            log.debug(
                                "Magic=%s Ver=%d Pages=%d hDPI=%d vDPI=%d Size=%d Res=%d Enc=%d"
                                % (magic, version, total_pages, hort_dpi,
                                   vert_dpi, page_size, resolution, encoding))

                            log.debug("Sending fax header...")
                            self.create_mfpdtf_fixed_header(
                                DT_FAX_IMAGES, True, PAGE_FLAG_NEW_DOC)
                            self.create_mfpdtf_fax_header(total_pages)

                            try:
                                self.write_stream()
                            except Error:
                                log.error("Unable to write to channel.")
                                fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_SEND_PAGES:  # --------------------------------- Send fax pages state machine (110, 130, 0)
                        log.debug("%s State: Send pages" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_SEND_END_OF_STREAM
                        page = BytesIO()

                        for p in range(total_pages):

                            if self.check_for_cancel():
                                fax_send_state = FAX_SEND_STATE_ABORT

                            if fax_send_state == FAX_SEND_STATE_ABORT:
                                break

                            try:
                                header = ff.read(PAGE_HEADER_SIZE)
                            except IOError:
                                log.error("Unable to read fax file.")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                continue

                            page_num, ppr, rpp, bytes_to_read, thumbnail_bytes, reserved2 = \
                                self.decode_page_header(header)

                            log.debug("Page=%d PPR=%d RPP=%d BPP=%d Thumb=%d" %
                                      (page_num, ppr, rpp, bytes_to_read,
                                       thumbnail_bytes))

                            page.write(ff.read(bytes_to_read))
                            thumbnail = ff.read(
                                thumbnail_bytes
                            )  # thrown away for now (should be 0 read)
                            page.seek(0)

                            self.create_mfpdtf_fixed_header(
                                DT_FAX_IMAGES, page_flags=PAGE_FLAG_NEW_PAGE)
                            self.create_sop_record(page_num, hort_dpi,
                                                   vert_dpi, ppr, rpp,
                                                   encoding)

                            try:
                                data = page.read(RASTER_DATA_SIZE)
                            except IOError:
                                log.error("Unable to read fax file.")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                continue

                            if data == '':
                                log.error("No data!")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                continue

                            self.create_raster_data_record(data)
                            total_read = RASTER_DATA_SIZE

                            while True:
                                data = page.read(RASTER_DATA_SIZE)
                                total_read += RASTER_DATA_SIZE

                                dl_state = self.getFaxDownloadState()
                                if dl_state == pml.UPDN_STATE_ERRORABORT:
                                    fax_send_state = FAX_SEND_STATE_ERROR
                                    break

                                if self.check_for_cancel():
                                    fax_send_state = FAX_SEND_STATE_ABORT
                                    break

                                if data == b'':
                                    self.create_eop_record(rpp)

                                    try:
                                        self.write_stream()
                                    except Error:
                                        log.error("Channel write error.")
                                        fax_send_state = FAX_SEND_STATE_ERROR
                                    break

                                else:
                                    try:
                                        self.write_stream()
                                    except Error:
                                        log.error("Channel write error.")
                                        fax_send_state = FAX_SEND_STATE_ERROR
                                        break

                                status = self.getFaxJobTxStatus()
                                while status == pml.FAXJOB_TX_STATUS_DIALING:
                                    self.write_queue(
                                        (STATUS_DIALING, 0, recipient['fax']))
                                    time.sleep(1.0)

                                    if self.check_for_cancel():
                                        fax_send_state = FAX_SEND_STATE_ABORT
                                        break

                                    dl_state = self.getFaxDownloadState()
                                    if dl_state == pml.UPDN_STATE_ERRORABORT:
                                        fax_send_state = FAX_SEND_STATE_ERROR
                                        break

                                    status = self.getFaxJobTxStatus()

                                if fax_send_state not in (
                                        FAX_SEND_STATE_ABORT,
                                        FAX_SEND_STATE_ERROR):

                                    while status == pml.FAXJOB_TX_STATUS_CONNECTING:
                                        self.write_queue((STATUS_CONNECTING, 0,
                                                          recipient['fax']))
                                        time.sleep(1.0)

                                        if self.check_for_cancel():
                                            fax_send_state = FAX_SEND_STATE_ABORT
                                            break

                                        dl_state = self.getFaxDownloadState()
                                        if dl_state == pml.UPDN_STATE_ERRORABORT:
                                            fax_send_state = FAX_SEND_STATE_ERROR
                                            break

                                        status = self.getFaxJobTxStatus()

                                if status == pml.FAXJOB_TX_STATUS_TRANSMITTING:
                                    self.write_queue((STATUS_SENDING, page_num,
                                                      recipient['fax']))

                                self.create_mfpdtf_fixed_header(DT_FAX_IMAGES,
                                                                page_flags=0)
                                self.create_raster_data_record(data)

                                if fax_send_state in (FAX_SEND_STATE_ABORT,
                                                      FAX_SEND_STATE_ERROR):
                                    break

                            page.truncate(0)
                            page.seek(0)

                    elif fax_send_state == FAX_SEND_STATE_SEND_END_OF_STREAM:  # -------------- EOS (110, 140, 0)
                        log.debug("%s State: Send EOS" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_WAIT_FOR_COMPLETE
                        log.debug("End of stream...")
                        self.create_mfpdtf_fixed_header(
                            DT_FAX_IMAGES, False, PAGE_FLAG_END_STREAM)

                        try:
                            self.write_stream()
                        except Error:
                            log.error("Channel write error.")
                            fax_send_state = FAX_SEND_STATE_ERROR

                        monitor_state = False

                    elif fax_send_state == FAX_SEND_STATE_WAIT_FOR_COMPLETE:  # -------------- Wait for complete (110, 150, 0)
                        log.debug("%s State: Wait for completion" % ("*" * 20))

                        fax_send_state = FAX_SEND_STATE_WAIT_FOR_COMPLETE

                        time.sleep(1.0)
                        status = self.getFaxJobTxStatus()

                        if status == pml.FAXJOB_TX_STATUS_DIALING:
                            self.write_queue(
                                (STATUS_DIALING, 0, recipient['fax']))
                            log.debug("Dialing ...")

                        elif status == pml.FAXJOB_TX_STATUS_TRANSMITTING:
                            self.write_queue(
                                (STATUS_SENDING, page_num, recipient['fax']))
                            log.debug("Transmitting ...")

                        elif status in (pml.FAXJOB_TX_STATUS_DONE,
                                        pml.FAXJOB_RX_STATUS_IDLE):
                            fax_send_state = FAX_SEND_STATE_RESET_TOKEN
                            state = STATE_NEXT_RECIPIENT
                            log.debug("Transmitting done or idle ...")

                        else:
                            self.write_queue(
                                (STATUS_SENDING, page_num, recipient['fax']))
                            log.debug("Pending ...")

                    elif fax_send_state == FAX_SEND_STATE_RESET_TOKEN:  # -------------- Release fax token (110, 160, 0)
                        log.debug("%s State: Release fax token" % ("*" * 20))
                        self.write_queue((STATUS_CLEANUP, 0, ''))

                        try:
                            self.dev.setPML(pml.OID_FAX_TOKEN, '\x00' * 16)
                        except Error:
                            log.error("Unable to release fax token.")

                        fax_send_state = FAX_SEND_STATE_CLOSE_SESSION

                    elif fax_send_state == FAX_SEND_STATE_CLOSE_SESSION:  # -------------- Close session (110, 170, 0)
                        log.debug("%s State: Close session" % ("*" * 20))
                        fax_send_state = FAX_SEND_STATE_DONE
                        log.debug("Closing session...")

                        try:
                            mm.close()
                        except NameError:
                            pass

                        try:
                            ff.close()
                        except NameError:
                            pass

                        if self.dev.fax_type == FAX_TYPE_BLACK_SEND_LATE_OPEN:
                            log.debug("Closing fax channel.")
                            self.dev.closeFax()

                        self.dev.setPML(pml.OID_FAX_DOWNLOAD,
                                        pml.UPDN_STATE_IDLE)

                        time.sleep(1)

                        if self.dev.fax_type == FAX_TYPE_BLACK_SEND_EARLY_OPEN:
                            log.debug("Closing fax channel.")
                            self.dev.closeFax()

                        self.dev.close()

            elif state == STATE_CLEANUP:  # --------------------------------- Cleanup (120, 0, 0)
                log.debug("%s State: Cleanup" % ("*" * 20))

                if self.remove_temp_file:
                    log.debug("Removing merged file: %s" % self.f)
                    try:
                        os.remove(self.f)
                        log.debug("Removed")
                    except OSError:
                        log.debug("Not found")

                state = STATE_DONE
class FP_TranslatorRestAPI:

    logLevel = 1
    translatorUrl = 'http://oldTranslator.com:8580/translator/services/FormatBuilder'
    translatorUrl = 'http://newTranslator.com:8380/translator/services/FormatBuilder'

    # Mostly flight plans will be passed in as xml strings in inString
    def __init__(self, inFile=None, outFile=None, url=None, inString=None):

        if url is None:
            self.url = FP_TranslatorRestAPI.translatorUrl
        else:
            self.url = url

        if inFile is None:
            if inString is None:
                self.log(
                    "No input translator message given, stopping FP_TranslatorRestAPI",
                    1)
                sys.exit()
            else:
                self.EF_req_data = inString
        else:
            self.inFile = inFile
            with open(self.inFile) as inFileH:
                self.EF_req_data = inFileH.read()
                inFileH.close()

        # gets outFile squared away
        if outFile is not None:
            self.outFile = outFile

        self.buffer = BytesIO()
        headers = 'SOAPAction: Header|Accept: application/json'
        self.hdrs = headers.split("|")

        self.soapEnv = '''<?xml version="1.0" encoding="UTF-8"?>
        <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
         <soapenv:Body>%s</soapenv:Body>
        </soapenv:Envelope>'''

        self.body = self.soapEnv % self.EF_req_data
        self.elapsed_time = None

    # returns the translator rquest message as string, no soap envelope
    def getEF003_ReqBody(self):
        return self.EF_req_data

    def getElapsedTime(self):
        return self.elapsed_time

    # sets the translator request message, and puts in soap envelope
    def setBody(self, body):
        self.body = self.soapEnv % body

    def setBodyFromFile(self, inFile):
        with open(self.inFile) as inFileH:
            body = inFileH.read()
            inFileH.close()
        self.body = self.soapEnv % body

    def writeRespToFile(self, body=None, respFile=None):
        if body is None:
            print(
                "FP_TranslatorRestAPI: message body not included with Write request."
            )
            return
        if respFile is None:
            print(
                "FP_TranslatorRestAPI: new file name not included with Write request."
            )
            return
        with open(respFile, 'w') as outFileH:
            outFileH.write(body)
            outFileH.close()

    def sendFlightPlan(self):
        # sets up curl object
        c = pycurl.Curl()
        c.setopt(pycurl.HTTPHEADER, self.hdrs)
        c.setopt(pycurl.POST, 1)
        c.setopt(pycurl.VERBOSE, 1)
        c.setopt(pycurl.URL, self.url)
        c.setopt(pycurl.POSTFIELDS, self.body)
        c.setopt(c.WRITEDATA, self.buffer)
        # sets up holder for curl command response
        self.buffer.truncate(0)
        self.buffer.seek(0)
        # runs the curl command

        sent_time = time.time()
        c.perform()
        self.elapsed_time = time.time() - sent_time
        status = c.getinfo(pycurl.RESPONSE_CODE)
        if status != 200:
            self.log("\n***** PYCURL HTTP STATUS CODE: %d *****\n" % status, 2)
        del c
        # extracts the response as string, from the soap envelope
        result = self.buffer.getvalue().decode('iso-8859-1')
        body = result[result.find("<xml_Flight_Plan"):]
        body = body[:body.find("</soapenv:Body>")]
        # writes the EF003 response as a file locally for someone to play with
        # if we didn't get a response, this writes the sent body to the file.
        # returns the EF003 response to the user As String
        return body

    def log(self, text, level=5):
        if FP_TranslatorRestAPI.logLevel >= level:
            print(text)
Exemple #49
0
class CSVLogger(_Logger):
    """
    CSV output, consisting of one line per entry. Entries are
    separated by a separator (a semicolon per default).
    """

    LoggerName = "csv"

    LoggerArgs = {
        "filename": "linkchecker-out.csv",
        'separator': ';',
        "quotechar": '"',
        "dialect": "excel",
    }

    def __init__(self, **kwargs):
        """Store default separator and (os dependent) line terminator."""
        args = self.get_args(kwargs)
        super(CSVLogger, self).__init__(**args)
        self.init_fileoutput(args)
        self.separator = args['separator']
        self.quotechar = args['quotechar']
        self.dialect = args['dialect']
        self.linesep = os.linesep

    def comment(self, s, **args):
        """Write CSV comment."""
        self.writeln(s=u"# %s" % s, **args)

    def start_output(self):
        """Write checking start info as csv comment."""
        super(CSVLogger, self).start_output()
        row = []
        if self.has_part("intro"):
            self.write_intro()
            self.flush()
        else:
            # write empty string to initialize file output
            self.write(u"")
        self.queue = BytesIO()
        self.writer = csv.writer(self.queue,
                                 dialect=self.dialect,
                                 delimiter=self.separator,
                                 lineterminator=self.linesep,
                                 quotechar=self.quotechar)
        for s in Columns:
            if self.has_part(s):
                row.append(s)
        if row:
            self.writerow(row)

    def log_url(self, url_data):
        """Write csv formatted url check info."""
        row = []
        if self.has_part("urlname"):
            row.append(url_data.base_url)
        if self.has_part("parentname"):
            row.append(url_data.parent_url)
        if self.has_part("baseref"):
            row.append(url_data.base_ref)
        if self.has_part("result"):
            row.append(url_data.result)
        if self.has_part("warningstring"):
            row.append(self.linesep.join(x[1] for x in url_data.warnings))
        if self.has_part("infostring"):
            row.append(self.linesep.join(url_data.info))
        if self.has_part("valid"):
            row.append(url_data.valid)
        if self.has_part("url"):
            row.append(url_data.url)
        if self.has_part("line"):
            row.append(url_data.line)
        if self.has_part("column"):
            row.append(url_data.column)
        if self.has_part("name"):
            row.append(url_data.name)
        if self.has_part("dltime"):
            row.append(url_data.dltime)
        if self.has_part("dlsize"):
            row.append(url_data.size)
        if self.has_part("checktime"):
            row.append(url_data.checktime)
        if self.has_part("cached"):
            row.append(0)
        if self.has_part("level"):
            row.append(url_data.level)
        if self.has_part("modified"):
            row.append(self.format_modified(url_data.modified))
        self.writerow(map(strformat.unicode_safe, row))
        self.flush()

    def writerow(self, row):
        """Write one row in CSV format."""
        self.writer.writerow(
            [s.encode("utf-8", self.codec_errors) for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and write to the target stream
        self.write(data)
        # empty queue
        self.queue.truncate(0)

    def end_output(self, **kwargs):
        """Write end of checking info as csv comment."""
        if self.has_part("outro"):
            self.write_outro()
        self.close_fileoutput()
Exemple #50
0
class CallbackTester(unittest.TestCase):
    """
    General callback tester to use with specific callback tests.
    Child class should implement callback_setup() to fill _callback_class, and _fixture attributes.
    """
    def setUp(self):
        self._out = BytesIO()
        self.callback_setup()

    def callback_setup(self):
        self._callback_class = None
        self._fixture = {'escape_db_file': 'non_ascii_values.rdb'}

    def escape_test_helper(self, escape_name):
        if self._callback_class is None:
            return  # Handle unittest discovery attempt to test with this "abstract" class.

        escape = getattr(encodehelpers, escape_name)
        callback = self._callback_class(out=self._out, string_escape=escape)
        parser = RdbParser(callback)
        parser.parse(
            os.path.join(os.path.dirname(__file__), TEST_DUMPS_DIR,
                         self._fixture['escape_db_file']))
        result = self._out.getvalue()
        # print('\n%s escape method %s' % (self._callback_class.__name__, escape_name))
        # print("\t\tself._fixture['escape_out_%s'] = %s" % (escape, repr(result)))
        # try:
        #     print(result.decode('utf8'))
        # except UnicodeDecodeError:
        #     print(result.decode('latin-1'))
        self.assertEqual(result,
                         self._fixture['escape_out_' + escape],
                         msg='\n%s escape method %s' %
                         (self._callback_class.__name__, escape_name))

    def test_raw_escape(self):
        """Test using STRING_ESCAPE_RAW with varied key encodings against expected output."""
        self.escape_test_helper('STRING_ESCAPE_RAW')

    def test_print_escape(self):
        """Test using STRING_ESCAPE_PRINT with varied key encodings against expected output."""
        self.escape_test_helper('STRING_ESCAPE_PRINT')

    def test_utf8_escape(self):
        """Test using STRING_ESCAPE_UTF8 with varied key encodings against expected output."""
        self.escape_test_helper('STRING_ESCAPE_UTF8')

    def test_base64_escape(self):
        """Test using STRING_ESCAPE_BASE64 with varied key encodings against expected output."""
        self.escape_test_helper('STRING_ESCAPE_BASE64')

    def test_all_dumps(self):
        """Run callback with all test dumps intercepting incidental crashes."""
        if self._callback_class is None:
            return  # Handle unittest discovery attempt to test with this "abstract" class.

        for dump_name in glob.glob(
                os.path.join(os.path.dirname(__file__), TEST_DUMPS_DIR,
                             '*.rdb')):
            callback = self._callback_class(out=self._out)
            parser = RdbParser(callback)
            try:
                parser.parse(dump_name)
            except Exception as err:
                raise self.failureException(
                    "%s on %s\n%s" %
                    (self._callback_class.__name__,
                     os.path.basename(dump_name), traceback.format_exc()))
            self._out.seek(0)
            self._out.truncate()
Exemple #51
0
class CaptureStandardIO(fixtures.Fixture):
    """Capture stdin, stdout, and stderr.

    Reading from `sys.stdin` will yield *unicode* strings, much like the
    default in Python 3. This differs from the usual behaviour in Python 2, so
    beware.

    Writing unicode strings to `sys.stdout` or `sys.stderr` will work; they'll
    be encoded with the `encoding` chosen when creating this fixture.

    `addInput(...)` should be used to prepare more input to be read.

    The `output` and `error` properties can be used to obtain what's been
    written to stdout and stderr.

    The buffers used internally have the same lifetime as the fixture
    *instance* itself, so the `output`, and `error` properties remain useful
    even after the fixture has been cleaned-up, and there's no need to capture
    them before exiting.

    However, `clearInput()`, `clearOutput()`, `clearError()`, and `clearAll()`
    can be used to truncate the buffers during this fixture's lifetime.
    """

    stdin = None
    stdout = None
    stderr = None

    def __init__(self, encoding="utf-8"):
        super(CaptureStandardIO, self).__init__()
        self.codec = codecs.lookup(encoding)
        self.encoding = encoding
        # Create new buffers.
        self._buf_in = BytesIO()
        self._buf_out = BytesIO()
        self._buf_err = BytesIO()

    def setUp(self):
        super(CaptureStandardIO, self).setUp()
        self.patcher = MonkeyPatcher()
        self.addCleanup(self.patcher.restore)
        # Patch sys.std* and self.std*. Use TextIOWrapper to provide an
        # identical API to the "real" stdin, stdout, and stderr objects.
        self._addStream("stdin", self._wrapStream(self._buf_in))
        self._addStream("stdout", self._wrapStream(self._buf_out))
        self._addStream("stderr", self._wrapStream(self._buf_err))
        self.patcher.patch()

    def _wrapStream(self, stream):
        return TextIOWrapper(stream,
                             encoding=self.encoding,
                             write_through=True)

    def _addStream(self, name, stream):
        self.patcher.add_patch(self, name, stream)
        self.patcher.add_patch(sys, name, stream)

    def addInput(self, data):
        """Add input to be read later, as a unicode string."""
        position = self._buf_in.tell()
        stream = self.codec.streamwriter(self._buf_in)
        try:
            self._buf_in.seek(0, 2)
            stream.write(data)
        finally:
            self._buf_in.seek(position)

    def getInput(self):
        """The input remaining to be read, as a unicode string."""
        position = self._buf_in.tell()
        if self.stdin is None:
            stream = self.codec.streamreader(self._buf_in)
        else:
            stream = self.stdin
        try:
            return stream.read()
        finally:
            self._buf_in.seek(position)

    def getOutput(self):
        """The output written thus far, as a unicode string."""
        if self.stdout is not None:
            self.stdout.flush()
        output_bytes = self._buf_out.getvalue()
        output_string, _ = self.codec.decode(output_bytes)
        return output_string

    def getError(self):
        """The error written thus far, as a unicode string."""
        if self.stderr is not None:
            self.stderr.flush()
        error_bytes = self._buf_err.getvalue()
        error_string, _ = self.codec.decode(error_bytes)
        return error_string

    def clearInput(self):
        """Truncate the input buffer."""
        self._buf_in.seek(0, 0)
        self._buf_in.truncate()
        if self.stdin is not None:
            self.stdin.seek(0, 0)

    def clearOutput(self):
        """Truncate the output buffer."""
        self._buf_out.seek(0, 0)
        self._buf_out.truncate()
        if self.stdout is not None:
            self.stdout.seek(0, 0)

    def clearError(self):
        """Truncate the error buffer."""
        self._buf_err.seek(0, 0)
        self._buf_err.truncate()
        if self.stderr is not None:
            self.stderr.seek(0, 0)

    def clearAll(self):
        """Truncate all buffers."""
        self.clearInput()
        self.clearOutput()
        self.clearError()
Exemple #52
0
class Particle:
    def __init__(self, jpc_data, particle_offset):
        self.magic = read_str(jpc_data, particle_offset, 8)
        assert self.magic == "JEFFjpa1"

        self.unknown_1 = read_u32(jpc_data, particle_offset + 8)
        self.num_chunks = read_u32(jpc_data, particle_offset + 0xC)
        self.size = read_u32(jpc_data, particle_offset +
                             0x10)  # Not accurate in some rare cases

        self.num_kfa1_chunks = read_u8(jpc_data, particle_offset + 0x14)
        self.num_fld1_chunks = read_u8(jpc_data, particle_offset + 0x15)
        self.num_textures = read_u8(jpc_data, particle_offset + 0x16)
        self.unknown_5 = read_u8(jpc_data, particle_offset + 0x17)

        self.particle_id = read_u16(jpc_data, particle_offset + 0x18)

        self.unknown_6 = read_bytes(jpc_data, particle_offset + 0x1A, 6)

        self.chunks = []
        self.chunk_by_type = {}
        chunk_offset = particle_offset + 0x20
        for chunk_index in range(0, self.num_chunks):
            chunk_magic = read_str(jpc_data, chunk_offset, 4)
            if chunk_magic in IMPLEMENTED_CHUNK_TYPES:
                chunk_class = globals().get(chunk_magic, None)
            else:
                chunk_class = J3DChunk
            chunk = chunk_class()
            chunk.read(jpc_data, chunk_offset)
            self.chunks.append(chunk)
            self.chunk_by_type[chunk.magic] = chunk

            if chunk.magic in IMPLEMENTED_CHUNK_TYPES:
                setattr(self, chunk.magic.lower(), chunk)

            chunk_offset += chunk.size

        self.tdb1.read_texture_ids(self.num_textures)

        true_size = (chunk_offset - particle_offset)
        jpc_data.seek(particle_offset)
        self.data = BytesIO(jpc_data.read(true_size))

    def save_changes(self):
        # Cut off the chunk data first since we're replacing this data entirely.
        self.data.truncate(0x20)
        self.data.seek(0x20)

        self.num_textures = len(self.tdb1.texture_ids)

        for chunk in self.chunks:
            chunk.save_changes()

            chunk.data.seek(0)
            chunk_data = chunk.data.read()
            self.data.write(chunk_data)

        # We don't recalculate this size field, since this is inaccurate anyway. It's probably not even used.
        #self.size = data_len(self.data)

        write_magic_str(self.data, 0, self.magic, 8)
        write_u32(self.data, 0x10, self.size)
Exemple #53
0
def decrypt_aes(file_or_data,
                password=None,
                outfile=None,
                salt=None,
                mode=None,
                base64encode=False,
                chunksize=512 * 1024):
    """ Flexible implementaiton of AES decryption

  Parameters
  ----------
  file_or_data : {BufferObject, string, bytes}
    input data will be converted to bytes sequence for encryption
  password : {str, None}
    if None, a prompt will ask for inputing password
  outfile : {None, path, file}
    if None, return raw encrypted data
  salt : {None, string, bytes}
    salt for password Hashing
  mode : Cipher.AES.MODE_*
  chunksize : int
    encryption chunk, multiple of 16.
  """
    try:
        from Crypto.Cipher import AES
    except ImportError as e:
        raise ImportError("Require 'pycrypto' to run this function")
    if mode is None:
        mode = AES.MODE_CBC

    if password is None:
        password = input("Your password: "******"Password length must be greater than 0"
    password = to_password(password, salt)
    # ====== read header ====== #
    infile, filesize, own_file = _data_to_iobuffer(file_or_data)
    origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
    iv = infile.read(16)
    decryptor = AES.new(password, mode=AES.MODE_CBC, IV=iv)
    # ====== outfile ====== #
    close_file = False
    if isinstance(outfile, string_types) and os.path.exists(
            os.path.dirname(outfile)):
        outfile = open(str(outfile), 'wb')
        close_file = True
    elif hasattr(outfile, 'write') and hasattr(outfile, 'flush'):
        close_file = True
    else:
        outfile = BytesIO()
    # ====== decryption ====== #
    while True:
        chunk = infile.read(chunksize)
        if len(chunk) == 0:
            break
        chunk = decryptor.decrypt(chunk)
        if bool(base64encode):
            chunk = base64.decodebytes(chunk)
        outfile.write(chunk)
    outfile.truncate(origsize)
    # ====== clean and return ====== #
    if own_file:
        infile.close()
    outfile.flush()
    if close_file:
        outfile.close()
    else:
        outfile.seek(0)
        data = outfile.read()
        outfile.close()
        return data
Exemple #54
0
class _ResponseReader(protocol.Protocol):
    def __init__(self, finished, txresponse, request, maxsize, warnsize,
                 fail_on_dataloss):
        self._finished = finished
        self._txresponse = txresponse
        self._request = request
        self._bodybuf = BytesIO()
        self._maxsize = maxsize
        self._warnsize = warnsize
        self._fail_on_dataloss = fail_on_dataloss
        self._fail_on_dataloss_warned = False
        self._reached_warnsize = False
        self._bytes_received = 0

    def dataReceived(self, bodyBytes):
        # This maybe called several times after cancel was called with buffered data.
        if self._finished.called:
            return

        self._bodybuf.write(bodyBytes)
        self._bytes_received += len(bodyBytes)

        if self._maxsize and self._bytes_received > self._maxsize:
            logger.error(
                "Received (%(bytes)s) bytes larger than download "
                "max size (%(maxsize)s) in request %(request)s.", {
                    'bytes': self._bytes_received,
                    'maxsize': self._maxsize,
                    'request': self._request
                })
            # Clear buffer earlier to avoid keeping data in memory for a long time.
            self._bodybuf.truncate(0)
            self._finished.cancel()

        if self._warnsize and self._bytes_received > self._warnsize and not self._reached_warnsize:
            self._reached_warnsize = True
            logger.warning(
                "Received more bytes than download "
                "warn size (%(warnsize)s) in request %(request)s.", {
                    'warnsize': self._warnsize,
                    'request': self._request
                })

    def connectionLost(self, reason):
        if self._finished.called:
            return

        body = self._bodybuf.getvalue()
        if reason.check(ResponseDone):
            self._finished.callback((self._txresponse, body, None))
            return

        if reason.check(PotentialDataLoss):
            self._finished.callback((self._txresponse, body, ['partial']))
            return

        if reason.check(ResponseFailed) and any(
                r.check(_DataLoss) for r in reason.value.reasons):
            if not self._fail_on_dataloss:
                self._finished.callback((self._txresponse, body, ['dataloss']))
                return

            elif not self._fail_on_dataloss_warned:
                logger.warning(
                    "Got data loss in %s. If you want to process broken "
                    "responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
                    " -- This message won't be shown in further requests",
                    self._txresponse.request.absoluteURI.decode())
                self._fail_on_dataloss_warned = True

        self._finished.errback(reason)
Exemple #55
0
    def captureGenerator(self):
        stream = BytesIO()
        for foo in range(self.shutter_auto_wait):
            yield stream
            stream.seek(0)
            stream.truncate(0)
        if self.capture_method == CAPTURE_BASIC:
            pass
        elif self.capture_method == CAPTURE_ON_FRAME:
            motor.direction = MOTOR_FORWARD
        elif self.capture_method == CAPTURE_ON_TRIGGER:
            if self.bracket_steps != 1:  #Reduce motor speed
                frames = 3 * self.shutter_auto_wait + self.shutter_speed_wait
                motor.speed = float(self.framerate) / frames
                msg = "Reducing motor speed to %f" % (motor.speed)
                headermsg = {'type': HEADER_MESSAGE, 'msg': msg}
                queue.put(headermsg)
                print(
                    'Capture on trigger with bracket reducing motor speed to',
                    motor.speed)
            motor.direction = MOTOR_FORWARD
            motor.advance()
        header = {'type': HEADER_IMAGE}

        while captureEvent.isSet():
            if not restartEvent.isSet():
                msgheader = {'type': HEADER_MESSAGE, 'msg': 'Pausing capture'}
                queue.put(msgheader)
                restartEvent.wait()
                msgheader = {'type': HEADER_MESSAGE, 'msg': 'Resuming capture'}
                queue.put(msgheader)
            if self.capture_method == CAPTURE_ON_TRIGGER:
                triggerEvent.wait()
            elif self.capture_method == CAPTURE_ON_FRAME:
                motor.advanceUntilTrigger()
#bypass old frames
            for foo in range(self.shutter_auto_wait):
                yield stream
                stream.seek(0)
                stream.truncate(0)

#Wait if queue is full
            if queue.qsize() > 20:
                if self.capture_method == CAPTURE_ON_TRIGGER:
                    motor.stop()
                msgheader = {'type': HEADER_MESSAGE, 'msg': 'Capture paused'}
                queue.put(msgheader)
                print('Warning queue > 20\n', flush=True)
                while queue.qsize() > 1:
                    time.sleep(1)
                msgheader = {'type': HEADER_MESSAGE, 'msg': 'Capture resumed'}
                queue.put(msgheader)
                if self.capture_method == CAPTURE_ON_TRIGGER:
                    motor.advance()
            self.frameCounter = self.frameCounter + 1

            #No bracket
            if self.bracket_steps == 1:
                yield stream
                stream.seek(0)
                self.putHeader(0)
                queue.put(stream.getvalue())
                stream.truncate(0)
            else:
                #Do braket
                #First shot image #3 Normal (auto)
                #Second shot image #2 light auto*light coeff
                #Third shot image #1 dark auto*dark coeff
                ##                coef = (self.bracket_light_coefficient, self.bracket_dark_coefficient,0) #normal clair sombre
                ##                autoExposureSpeed = self.exposure_speed              #First shoot
                ##                for i in range(self.bracket_steps) :
                ##                    exposureSpeed =  int(autoExposureSpeed * coef[i]) #Exposure for next shot last is 0 (auto)
                ##                    self.shutter_speed = exposureSpeed
                ##                    yield stream
                ##                    stream.seek(0)
                ##                    self.putHeader(self.bracket_steps - i) #First is 3 Last  is 1
                ##                    queue.put(stream.getvalue())
                ##                    stream.truncate(0)
                ##                    for foo in range(self.shutter_speed_wait) :
                ##                        yield stream
                ##                        stream.seek(0)
                ##                        stream.truncate(0)

                #Normal
                autoExposureSpeed = self.exposure_speed  #First shoot
                self.shutter_speed = int(autoExposureSpeed *
                                         self.bracket_light_coefficient)
                yield stream
                stream.seek(0)
                self.putHeader(3)  #First is 3 Last  is 1
                queue.put(stream.getvalue())
                stream.truncate(0)
                #Overexposed
                for foo in range(self.shutter_speed_wait):
                    yield stream
                    stream.seek(0)
                    stream.truncate(0)
                self.exposure_mode = 'off'  #lock the gains
                self.shutter_speed = int(autoExposureSpeed *
                                         self.bracket_dark_coefficient)
                yield stream
                stream.seek(0)
                self.putHeader(2)  #First is 3 Last  is 1
                queue.put(stream.getvalue())
                stream.truncate(0)
                #Underexposed
                for foo in range(self.shutter_speed_wait):
                    yield stream
                    stream.seek(0)
                    stream.truncate(0)
                self.shutter_speed = autoExposureSpeed
                self.exposure_mode = 'auto'
                yield stream
                stream.seek(0)
                self.putHeader(1)  #First is 3 Last  is 1
                queue.put(stream.getvalue())
                stream.truncate(0)
                for foo in range(self.shutter_speed_wait):
                    yield stream
                    stream.seek(0)
                    stream.truncate(0)
                self.shutter_speed = 0

        if self.capture_method == CAPTURE_ON_TRIGGER:
            motor.stop()
    def __call__(self):
        self.control_bytes = self.calculate_control_bytes_for_each_entry(
            self.entries)

        index_blocks, idxt_blocks, record_counts, last_indices = [BytesIO()], [
            BytesIO()
        ], [0], [b'']
        buf = BytesIO()

        RECORD_LIMIT = 0x10000 - self.HEADER_LENGTH - 1048  # kindlegen uses 1048 (there has to be some margin because of block alignment)

        for i, (index_num, tags) in enumerate(self.entries):
            control_bytes = self.control_bytes[i]
            buf.seek(0), buf.truncate(0)
            index_num = (index_num.encode('utf-8') if isinstance(
                index_num, unicode_type) else index_num)
            raw = bytearray(index_num)
            raw.insert(0, len(index_num))
            buf.write(bytes(raw))
            buf.write(bytes(bytearray(control_bytes)))
            for tag in self.tag_types:
                values = tags.get(tag.name, None)
                if values is None:
                    continue
                try:
                    len(values)
                except TypeError:
                    values = [values]
                if values:
                    for val in values:
                        try:
                            buf.write(encint(val))
                        except ValueError:
                            raise ValueError('Invalid values for %r: %r' %
                                             (tag, values))
            raw = buf.getvalue()
            offset = index_blocks[-1].tell()
            idxt_pos = idxt_blocks[-1].tell()
            if offset + idxt_pos + len(raw) + 2 > RECORD_LIMIT:
                index_blocks.append(BytesIO())
                idxt_blocks.append(BytesIO())
                record_counts.append(0)
                offset = idxt_pos = 0
                last_indices.append(b'')
            record_counts[-1] += 1
            idxt_blocks[-1].write(pack(b'>H', self.HEADER_LENGTH + offset))
            index_blocks[-1].write(raw)
            last_indices[-1] = index_num

        index_records = []
        for index_block, idxt_block, record_count in zip(
                index_blocks, idxt_blocks, record_counts):
            index_block = align_block(index_block.getvalue())
            idxt_block = align_block(b'IDXT' + idxt_block.getvalue())
            # Create header for this index record
            header = b'INDX'
            buf.seek(0), buf.truncate(0)
            buf.write(pack(b'>I', self.HEADER_LENGTH))
            buf.write(b'\0' * 4)  # Unknown
            buf.write(
                pack(b'>I', 1)
            )  # Header type (0 for Index header record and 1 for Index records)
            buf.write(b'\0' * 4)  # Unknown

            # IDXT block offset
            buf.write(pack(b'>I', self.HEADER_LENGTH + len(index_block)))

            # Number of index entries in this record
            buf.write(pack(b'>I', record_count))

            buf.write(b'\xff' * 8)  # Unknown

            buf.write(b'\0' * 156)  # Unknown

            header += buf.getvalue()
            index_records.append(header + index_block + idxt_block)
            if len(index_records[-1]) > 0x10000:
                raise ValueError(
                    'Failed to rollover index blocks for very large index.')

        # Create the Index Header record
        tagx = self.generate_tagx()

        # Geometry of the index records is written as index entries pointed to
        # by the IDXT records
        buf.seek(0), buf.truncate()
        idxt = [b'IDXT']
        pos = IndexHeader.HEADER_LENGTH + len(tagx)
        for last_idx, num in zip(last_indices, record_counts):
            start = buf.tell()
            idxt.append(pack(b'>H', pos))
            buf.write(bytes(bytearray([len(last_idx)])) + last_idx)
            buf.write(pack(b'>H', num))
            pos += buf.tell() - start

        header = {
            'num_of_entries': sum(r for r in record_counts),
            'num_of_records': len(index_records),
            'num_of_cncx': len(self.cncx),
            'tagx': align_block(tagx),
            'geometry': align_block(buf.getvalue()),
            'idxt': align_block(b''.join(idxt)),
        }
        header = IndexHeader()(**header)
        self.records = [header] + index_records
        self.records.extend(self.cncx.records)
        return self.records
            #img.save(captureTime + ".jpg")

            # Detect cat
            cat_present = detect_objects(interpreter, img, 0.15)
            print("cat present:", cat_present)

            # Send POST request to webserver
            payload = {'status': int(cat_present)}
            myHeaders = {'Content-type': 'application/json'}
            r = req.post(url, json=payload, headers=myHeaders)
            if r.status_code == 200:
                content = r.json()
                print(captureTime, content)
                # Cooldown timer if dispenser is activated
                if content['action'] == 'dispensed':
                    print("Waiting 1 min")
                    time.sleep(60)
                else:
                    print("wait 0.5 sec")
                    time.sleep(0.5)
            else:
                print("server error: ", r)

        except Exception as e:
            print("exception:", e.args)

        finally:
            # Clear stream and reset position
            stream.truncate(0)
            stream.seek(0)
Exemple #58
0
class Curl:
    "High-level interface to pycurl functions."
    def __init__(self, base_url="", fakeheaders=None):
        self.handle = pycurl.Curl()
        # These members might be set.
        self.set_url(base_url)
        self.verbosity = 0
        self.fakeheaders = fakeheaders or []
        # Nothing past here should be modified by the caller.
        self.payload = None
        self.payload_io = BytesIO()
        self.hdr = ""
        # Verify that we've got the right site; harmless on a non-SSL connect.
        self.set_option(pycurl.SSL_VERIFYHOST, 2)
        # Follow redirects in case it wants to take us to a CGI...
        self.set_option(pycurl.FOLLOWLOCATION, 1)
        self.set_option(pycurl.MAXREDIRS, 5)
        self.set_option(pycurl.NOSIGNAL, 1)
        # Setting this option with even a nonexistent file makes libcurl
        # handle cookie capture and playback automatically.
        self.set_option(pycurl.COOKIEFILE, "/dev/null")
        # Set timeouts to avoid hanging too long
        self.set_timeout(30)
        # Use password identification from .netrc automatically
        self.set_option(pycurl.NETRC, 1)
        self.set_option(pycurl.WRITEFUNCTION, self.payload_io.write)
        def header_callback(x):
            self.hdr += x.decode('ascii')
        self.set_option(pycurl.HEADERFUNCTION, header_callback)

    def set_timeout(self, timeout):
        "Set timeout for a retrieving an object"
        self.set_option(pycurl.TIMEOUT, timeout)

    def set_url(self, url):
        "Set the base URL to be retrieved."
        self.base_url = url
        self.set_option(pycurl.URL, self.base_url)

    def set_option(self, *args):
        "Set an option on the retrieval."
        self.handle.setopt(*args)

    def set_verbosity(self, level):
        "Set verbosity to 1 to see transactions."
        self.set_option(pycurl.VERBOSE, level)

    def __request(self, relative_url=None):
        "Perform the pending request."
        if self.fakeheaders:
            self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
        if relative_url:
            self.set_option(pycurl.URL, urljoin(self.base_url, relative_url))
        self.payload = None
        self.payload_io.seek(0)
        self.payload_io.truncate()
        self.hdr = ""
        self.handle.perform()
        self.payload = self.payload_io.getvalue()
        return self.payload

    def get(self, url="", params=None):
        "Ship a GET request for a specified URL, capture the response."
        if params:
            url += "?" + urllib_parse.urlencode(params)
        self.set_option(pycurl.HTTPGET, 1)
        return self.__request(url)

    def head(self, url="", params=None):
        "Ship a HEAD request for a specified URL, capture the response."
        if params:
            url += "?" + urllib_parse.urlencode(params)
        self.set_option(pycurl.NOBODY, 1)
        return self.__request(url)

    def post(self, cgi, params):
        "Ship a POST request to a specified CGI, capture the response."
        self.set_option(pycurl.POST, 1)
        self.set_option(pycurl.POSTFIELDS, urllib_parse.urlencode(params))
        return self.__request(cgi)

    def body(self):
        "Return the body from the last response."
        return self.payload

    def header(self):
        "Return the header from the last response."
        return self.hdr

    def get_info(self, *args):
        "Get information about retrieval."
        return self.handle.getinfo(*args)

    def info(self):
        "Return a dictionary with all info on the last response."
        m = {}
        m['effective-url'] = self.handle.getinfo(pycurl.EFFECTIVE_URL)
        m['http-code'] = self.handle.getinfo(pycurl.HTTP_CODE)
        m['total-time'] = self.handle.getinfo(pycurl.TOTAL_TIME)
        m['namelookup-time'] = self.handle.getinfo(pycurl.NAMELOOKUP_TIME)
        m['connect-time'] = self.handle.getinfo(pycurl.CONNECT_TIME)
        m['pretransfer-time'] = self.handle.getinfo(pycurl.PRETRANSFER_TIME)
        m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
        m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
        m['size-upload'] = self.handle.getinfo(pycurl.SIZE_UPLOAD)
        m['size-download'] = self.handle.getinfo(pycurl.SIZE_DOWNLOAD)
        m['speed-upload'] = self.handle.getinfo(pycurl.SPEED_UPLOAD)
        m['header-size'] = self.handle.getinfo(pycurl.HEADER_SIZE)
        m['request-size'] = self.handle.getinfo(pycurl.REQUEST_SIZE)
        m['content-length-download'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
        m['content-length-upload'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
        m['content-type'] = self.handle.getinfo(pycurl.CONTENT_TYPE)
        m['response-code'] = self.handle.getinfo(pycurl.RESPONSE_CODE)
        m['speed-download'] = self.handle.getinfo(pycurl.SPEED_DOWNLOAD)
        m['ssl-verifyresult'] = self.handle.getinfo(pycurl.SSL_VERIFYRESULT)
        m['filetime'] = self.handle.getinfo(pycurl.INFO_FILETIME)
        m['starttransfer-time'] = self.handle.getinfo(pycurl.STARTTRANSFER_TIME)
        m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
        m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
        m['http-connectcode'] = self.handle.getinfo(pycurl.HTTP_CONNECTCODE)
        m['httpauth-avail'] = self.handle.getinfo(pycurl.HTTPAUTH_AVAIL)
        m['proxyauth-avail'] = self.handle.getinfo(pycurl.PROXYAUTH_AVAIL)
        m['os-errno'] = self.handle.getinfo(pycurl.OS_ERRNO)
        m['num-connects'] = self.handle.getinfo(pycurl.NUM_CONNECTS)
        m['ssl-engines'] = self.handle.getinfo(pycurl.SSL_ENGINES)
        m['cookielist'] = self.handle.getinfo(pycurl.INFO_COOKIELIST)
        m['lastsocket'] = self.handle.getinfo(pycurl.LASTSOCKET)
        m['ftp-entry-path'] = self.handle.getinfo(pycurl.FTP_ENTRY_PATH)
        return m

    def answered(self, check):
        "Did a given check string occur in the last payload?"
        return self.payload.find(check) >= 0

    def close(self):
        "Close a session, freeing resources."
        if self.handle:
            self.handle.close()
        self.handle = None
        self.hdr = ""
        self.payload = ""

    def __del__(self):
        self.close()
Exemple #59
0
class Renderer(object):
    """Helper class for building DNS wire-format messages.

    Most applications can use the higher-level L{dns.message.Message}
    class and its to_wire() method to generate wire-format messages.
    This class is for those applications which need finer control
    over the generation of messages.

    Typical use::

        r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
        r.add_question(qname, qtype, qclass)
        r.add_rrset(dns.renderer.ANSWER, rrset_1)
        r.add_rrset(dns.renderer.ANSWER, rrset_2)
        r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
        r.add_edns(0, 0, 4096)
        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
        r.write_header()
        r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
        wire = r.get_wire()

    @ivar output: where rendering is written
    @type output: BytesIO object
    @ivar id: the message id
    @type id: int
    @ivar flags: the message flags
    @type flags: int
    @ivar max_size: the maximum size of the message
    @type max_size: int
    @ivar origin: the origin to use when rendering relative names
    @type origin: dns.name.Name object
    @ivar compress: the compression table
    @type compress: dict
    @ivar section: the section currently being rendered
    @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
    dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
    @ivar counts: list of the number of RRs in each section
    @type counts: int list of length 4
    @ivar mac: the MAC of the rendered message (if TSIG was used)
    @type mac: string
    """
    def __init__(self, id=None, flags=0, max_size=65535, origin=None):
        """Initialize a new renderer.

        @param id: the message id
        @type id: int
        @param flags: the DNS message flags
        @type flags: int
        @param max_size: the maximum message size; the default is 65535.
        If rendering results in a message greater than I{max_size},
        then L{dns.exception.TooBig} will be raised.
        @type max_size: int
        @param origin: the origin to use when rendering relative names
        @type origin: dns.name.Name or None.
        """

        self.output = BytesIO()
        if id is None:
            self.id = random.randint(0, 65535)
        else:
            self.id = id
        self.flags = flags
        self.max_size = max_size
        self.origin = origin
        self.compress = {}
        self.section = QUESTION
        self.counts = [0, 0, 0, 0]
        self.output.write(b'\x00' * 12)
        self.mac = ''

    def _rollback(self, where):
        """Truncate the output buffer at offset I{where}, and remove any
        compression table entries that pointed beyond the truncation
        point.

        @param where: the offset
        @type where: int
        """

        self.output.seek(where)
        self.output.truncate()
        keys_to_delete = []
        for k, v in self.compress.items():
            if v >= where:
                keys_to_delete.append(k)
        for k in keys_to_delete:
            del self.compress[k]

    def _set_section(self, section):
        """Set the renderer's current section.

        Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
        ADDITIONAL.  Sections may be empty.

        @param section: the section
        @type section: int
        @raises dns.exception.FormError: an attempt was made to set
        a section value less than the current section.
        """

        if self.section != section:
            if self.section > section:
                raise dns.exception.FormError
            self.section = section

    def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
        """Add a question to the message.

        @param qname: the question name
        @type qname: dns.name.Name
        @param rdtype: the question rdata type
        @type rdtype: int
        @param rdclass: the question rdata class
        @type rdclass: int
        """

        self._set_section(QUESTION)
        before = self.output.tell()
        qname.to_wire(self.output, self.compress, self.origin)
        self.output.write(struct.pack("!HH", rdtype, rdclass))
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[QUESTION] += 1

    def add_rrset(self, section, rrset, **kw):
        """Add the rrset to the specified section.

        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.

        @param section: the section
        @type section: int
        @param rrset: the rrset
        @type rrset: dns.rrset.RRset object
        """

        self._set_section(section)
        before = self.output.tell()
        n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[section] += n

    def add_rdataset(self, section, name, rdataset, **kw):
        """Add the rdataset to the specified section, using the specified
        name as the owner name.

        Any keyword arguments are passed on to the rdataset's to_wire()
        routine.

        @param section: the section
        @type section: int
        @param name: the owner name
        @type name: dns.name.Name object
        @param rdataset: the rdataset
        @type rdataset: dns.rdataset.Rdataset object
        """

        self._set_section(section)
        before = self.output.tell()
        n = rdataset.to_wire(name, self.output, self.compress, self.origin,
                             **kw)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[section] += n

    def add_edns(self, edns, ednsflags, payload, options=None):
        """Add an EDNS OPT record to the message.

        @param edns: The EDNS level to use.
        @type edns: int
        @param ednsflags: EDNS flag values.
        @type ednsflags: int
        @param payload: The EDNS sender's payload field, which is the maximum
        size of UDP datagram the sender can handle.
        @type payload: int
        @param options: The EDNS options list
        @type options: list of dns.edns.Option instances
        @see: RFC 2671
        """

        # make sure the EDNS version in ednsflags agrees with edns
        ednsflags &= long(0xFF00FFFF)
        ednsflags |= (edns << 16)
        self._set_section(ADDITIONAL)
        before = self.output.tell()
        self.output.write(
            struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload, ednsflags, 0))
        if options is not None:
            lstart = self.output.tell()
            for opt in options:
                stuff = struct.pack("!HH", opt.otype, 0)
                self.output.write(stuff)
                start = self.output.tell()
                opt.to_wire(self.output)
                end = self.output.tell()
                assert end - start < 65536
                self.output.seek(start - 2)
                stuff = struct.pack("!H", end - start)
                self.output.write(stuff)
                self.output.seek(0, 2)
            lend = self.output.tell()
            assert lend - lstart < 65536
            self.output.seek(lstart - 2)
            stuff = struct.pack("!H", lend - lstart)
            self.output.write(stuff)
            self.output.seek(0, 2)
        after = self.output.tell()
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.counts[ADDITIONAL] += 1

    def add_tsig(self,
                 keyname,
                 secret,
                 fudge,
                 id,
                 tsig_error,
                 other_data,
                 request_mac,
                 algorithm=dns.tsig.default_algorithm):
        """Add a TSIG signature to the message.

        @param keyname: the TSIG key name
        @type keyname: dns.name.Name object
        @param secret: the secret to use
        @type secret: string
        @param fudge: TSIG time fudge
        @type fudge: int
        @param id: the message id to encode in the tsig signature
        @type id: int
        @param tsig_error: TSIG error code; default is 0.
        @type tsig_error: int
        @param other_data: TSIG other data.
        @type other_data: string
        @param request_mac: This message is a response to the request which
        had the specified MAC.
        @type request_mac: string
        @param algorithm: the TSIG algorithm to use
        @type algorithm: dns.name.Name object
        """

        self._set_section(ADDITIONAL)
        before = self.output.tell()
        s = self.output.getvalue()
        (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
                                                    keyname,
                                                    secret,
                                                    int(time.time()),
                                                    fudge,
                                                    id,
                                                    tsig_error,
                                                    other_data,
                                                    request_mac,
                                                    algorithm=algorithm)
        keyname.to_wire(self.output, self.compress, self.origin)
        self.output.write(
            struct.pack('!HHIH', dns.rdatatype.TSIG, dns.rdataclass.ANY, 0, 0))
        rdata_start = self.output.tell()
        self.output.write(tsig_rdata)
        after = self.output.tell()
        assert after - rdata_start < 65536
        if after >= self.max_size:
            self._rollback(before)
            raise dns.exception.TooBig
        self.output.seek(rdata_start - 2)
        self.output.write(struct.pack('!H', after - rdata_start))
        self.counts[ADDITIONAL] += 1
        self.output.seek(10)
        self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
        self.output.seek(0, 2)

    def write_header(self):
        """Write the DNS message header.

        Writing the DNS message header is done after all sections
        have been rendered, but before the optional TSIG signature
        is added.
        """

        self.output.seek(0)
        self.output.write(
            struct.pack('!HHHHHH', self.id, self.flags, self.counts[0],
                        self.counts[1], self.counts[2], self.counts[3]))
        self.output.seek(0, 2)

    def get_wire(self):
        """Return the wire format message.

        @rtype: string
        """

        return self.output.getvalue()