Пример #1
0
 def process_online_reco(self, data, blob):
     data_io = BytesIO(data)
     preamble = DAQPreamble(file_obj=data_io)    # noqa
     _data = unpack('<iiiQI', data_io.read(4 + 4 + 4 + 8 + 4))
     det_id, run_id, frame_index, trigger_counter, utc_seconds = _data
     shower_reco = unpack('9d', data_io.read(9 * 8))
     shower_meta = unpack('3i', data_io.read(12))
     track_reco = unpack('9d', data_io.read(9 * 8))
     track_meta = unpack('3i', data_io.read(12))
     print(
         "Shower: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", shower_reco,
         shower_meta
     )
     print(
         "Track: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", track_reco,
         track_meta
     )
     blob['ReconstructionInfo'] = Table({
         'det_id': det_id,
         'run_id': run_id,
         'frame_index': frame_index,
         'trigger_counter': trigger_counter,
         'utc_seconds': utc_seconds
     },
                                        h5loc='reco',
                                        split_h5=True,
                                        name='Reconstructions')
     args = track_reco + track_meta
     blob['RecoTrack'] = RecoTrack(*args)
     args = shower_reco + shower_meta
     blob['RecoShower'] = RecoShower(*args)
Пример #2
0
    def __init__(self, raw, sector_size, update_seq):
        """
        :param raw: raw bytes read from real stream.
        :param sector_size: size of sectors.
        :param update_seq: update sequence.
        """

        super(NTFSClusterStream, self).__init__()

        fix_up, updates = update_seq[:2],\
                          (lambda _: (pack('BB', *p)
                                      for p in zip(*[_] * 2)))(
                              iter(update_seq[2:]))

        rd = BytesIO(raw)
        wr = BytesIO()
        while True:
            _ = rd.read(sector_size - 2)
            if not _:
                break

            wr.write(_)
            if rd.read(2) != fix_up:
                raise CorruptedSector
            else:
                wr.write(next(updates))

        self._stream = BytesIO(wr.getvalue())
Пример #3
0
 def iterblocks(self,M,bitlen=None):
     needed = len(M)*8
     # handle NIST MSB alignment to Keccak LSB alignment for last byte
     # (see Keccak SHA-3 submission §6.1):
     if bitlen:
         assert bitlen<=needed
         needed = bitlen
         if not self.duplexing:
             b = Bits(M[-1:],size=needed%8)[::-1]
             M = M[:needed//8]+newbytes([b.ival])
     r = self.r
     br,rr = divmod(r,8)
     P = BytesIO(M)
     # init iterator loop:
     Pi = P.read(br)
     Pb = Bits(0,size=0)
     while len(Pi)>0:
         # input message bitstream buffer:
         Pb =  Pb//Bits(Pi,bitorder=1)
         if len(Pb)>=needed:
             Pb.size=needed
             P.read() # consume all stream to exit loop
         if len(Pb)>=r:
             yield Pb[:r]
             needed -= r
             Pb = Pb[r:]
         Pi = P.read(br)
     # pad10*1 (with little-endian convention) :
     Pb = Pb//Bits(1)//Bits(0,size=r-len(Pb)-2)//Bits(1)
     yield Pb
Пример #4
0
    def test_view_excel_file_sorted(self):
        semester = mommy.make(Semester)
        course_type = mommy.make(CourseType)
        mommy.make(Evaluation, state='published', course=mommy.make(Course, type=course_type, semester=semester, name_de="A", name_en="B"),
                   name_de='Evaluation1', name_en='Evaluation1')

        mommy.make(Evaluation, state='published', course=mommy.make(Course, type=course_type, semester=semester, name_de="B", name_en="A"),
                   name_de='Evaluation2', name_en='Evaluation2')

        content_de = BytesIO()
        with translation.override("de"):
            ExcelExporter(semester).export(content_de, [[course_type.id]], True, True)

        content_en = BytesIO()
        with translation.override("en"):
            ExcelExporter(semester).export(content_en, [[course_type.id]], True, True)

        content_de.seek(0)
        content_en.seek(0)

        # Load responses as Excel files and check for correct sorting
        workbook = xlrd.open_workbook(file_contents=content_de.read())
        self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation1")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation2")

        workbook = xlrd.open_workbook(file_contents=content_en.read())
        self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation2")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1")
Пример #5
0
    def test_course_type_ordering(self):
        course_type_1 = mommy.make(CourseType, order=1)
        course_type_2 = mommy.make(CourseType, order=2)
        semester = mommy.make(Semester)
        evaluation_1 = mommy.make(Evaluation, course=mommy.make(Course, semester=semester, type=course_type_1), state='published', _participant_count=2, _voter_count=2)
        evaluation_2 = mommy.make(Evaluation, course=mommy.make(Course, semester=semester, type=course_type_2), state='published', _participant_count=2, _voter_count=2)

        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, type=Question.LIKERT, questionnaire=questionnaire)

        evaluation_1.general_contribution.questionnaires.set([questionnaire])
        mommy.make(RatingAnswerCounter, question=question, contribution=evaluation_1.general_contribution, answer=3, count=2)

        evaluation_2.general_contribution.questionnaires.set([questionnaire])
        mommy.make(RatingAnswerCounter, question=question, contribution=evaluation_2.general_contribution, answer=3, count=2)

        binary_content = BytesIO()
        ExcelExporter(semester).export(binary_content, [[course_type_1.id, course_type_2.id]], True, True)
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name)
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name)

        course_type_2.order = 0
        course_type_2.save()

        binary_content = BytesIO()
        ExcelExporter(semester).export(binary_content, [[course_type_1.id, course_type_2.id]], True, True)
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name)
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name)
Пример #6
0
class TFramedTransport(TTransportBase, CReadableTransport):
  """Class that wraps another transport and frames its I/O when writing."""

  def __init__(self, trans,):
    self.__trans = trans
    self.__rbuf = BytesIO()
    self.__wbuf = BytesIO()

  def isOpen(self):
    return self.__trans.isOpen()

  def open(self):
    return self.__trans.open()

  def close(self):
    return self.__trans.close()

  def read(self, sz):
    ret = self.__rbuf.read(sz)
    if len(ret) != 0:
      return ret

    self.readFrame()
    return self.__rbuf.read(sz)

  def readFrame(self):
    buff = self.__trans.readAll(4)
    sz, = unpack('!i', buff)
    self.__rbuf = BytesIO(self.__trans.readAll(sz))

  def write(self, buf):
    self.__wbuf.write(buf)

  def flush(self):
    wout = self.__wbuf.getvalue()
    wsz = len(wout)
    # reset wbuf before write/flush to preserve state on underlying failure
    self.__wbuf = BytesIO()
    # N.B.: Doing this string concatenation is WAY cheaper than making
    # two separate calls to the underlying socket object. Socket writes in
    # Python turn out to be REALLY expensive, but it seems to do a pretty
    # good job of managing string buffer operations without excessive copies
    buf = pack("!i", wsz) + wout
    self.__trans.write(buf)
    self.__trans.flush()

  # Implement the CReadableTransport interface.
  @property
  def cstringio_buf(self):
    return self.__rbuf

  def cstringio_refill(self, prefix, reqlen):
    # self.__rbuf will already be empty here because fastbinary doesn't
    # ask for a refill until the previous buffer is empty.  Therefore,
    # we can start reading new frames immediately.
    while len(prefix) < reqlen:
      self.readFrame()
      prefix += self.__rbuf.getvalue()
    self.__rbuf = BytesIO(prefix)
    return self.__rbuf
Пример #7
0
def unbind(filedata):
    """This is old code I didn't bother refactoring, sorry for the mess"""
    binded = BytesIO(filedata)

    binded.seek(-4, os.SEEK_END)
    indexsize = binded.read(4)
    indexsize = struct.unpack('<I', indexsize)[0]

    binded.seek(0 - (indexsize + 4), os.SEEK_END)
    endofdata = binded.tell()

    indexdata = binded.read(indexsize - 1)

    binded.seek(endofdata, os.SEEK_SET)

    files = []
    for line in reversed(indexdata.splitlines()):
        file = line.split(b'/')
        if (file[0] != b"__END_OF_SERIES_OF_BINDED_FILES__"):
            fstart = int(file[1], 10)
            fend = binded.tell()
            fsize = (fend - fstart)
            binded.seek(-fsize, os.SEEK_CUR)
            buffer = binded.read(fsize + 1)
            name = file[0].decode('utf-8') + getfileext(buffer)
            if (fstart > 0):
                binded.seek(fstart - 1, os.SEEK_SET)
            files.append((name, buffer))
    return files
Пример #8
0
    def parse_header(self):
        self.header = self.stream.read(196)

        f = BytesIO(self.header)

        chunk = f.read(8)
        # array_start = struct.unpack('!i', chunk[:4])
        # print(array_start[0])
        # print(chunk.endswith('CORD'))

        chunk = f.read(80)
        # header = struct.unpack('!' + 'i'*20, chunk)
        # print('Number of frames:', header[0])

        chunk = f.read(4)
        # array_start = struct.unpack('!i', chunk)
        # print(array_start[0])

        chunk = f.read(92)
        # values = struct.unpack('!ii' + 'c'*80 + 'i', chunk)
        # comment = ''.join(values[2:-2])
        # print(comment)

        chunk = f.read(12)
        values = struct.unpack('!iii', chunk)
        self.num_particles = values[1]
        # print(values, self.num_particles)

        f.close()
Пример #9
0
class DummySocket(object):
    def __init__(self):
        self.queue = []
        self._buffer = BytesIO()
        self._read_counter = 0
        self.can_read = False

    @property
    def buffer(self):
        return memoryview(self._buffer.getvalue()[self._read_counter:])

    @buffer.setter
    def buffer(self, value):
        self._buffer = value
        self._read_counter = 0

    def advance_buffer(self, amt):
        self._read_counter += amt
        self._buffer.read(amt)

    def send(self, data):
        self.queue.append(data)

    sendall = send

    def recv(self, l):
        data = self._buffer.read(l)
        self._read_counter += len(data)
        return memoryview(data)

    def close(self):
        pass

    def fill(self):
        pass
Пример #10
0
    def test_view_excel_file_sorted(self):
        semester = mommy.make(Semester)
        course_type = mommy.make(CourseType)
        course1 = mommy.make(Course, state='published', type=course_type,
                             name_de='A - Course1', name_en='B - Course1', semester=semester)

        course2 = mommy.make(Course, state='published', type=course_type,
                             name_de='B - Course2', name_en='A - Course2', semester=semester)

        mommy.make(Contribution, course=course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
        mommy.make(Contribution, course=course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)

        content_de = BytesIO()
        with translation.override("de"):
            ExcelExporter(semester).export(content_de, [[course_type.id]], True, True)

        content_en = BytesIO()
        with translation.override("en"):
            ExcelExporter(semester).export(content_en, [[course_type.id]], True, True)

        content_de.seek(0)
        content_en.seek(0)

        # Load responses as Excel files and check for correct sorting
        workbook = xlrd.open_workbook(file_contents=content_de.read())
        self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A - Course1")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B - Course2")

        workbook = xlrd.open_workbook(file_contents=content_en.read())
        self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A - Course2")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B - Course1")
Пример #11
0
    def handle_message(self, endpoint, payload):
        args = struct.unpack("<16s10sfHH", payload[:34])
        uuid_bytes, bsn, master_ts = args[:3]
        l_master_pkey, l_signuture = args[3:]
        self.serial = sn = bsn.decode("ascii")
        self.uuid = uuid = UUID(bytes=uuid_bytes)

        f = BytesIO(payload[34:])
        masterkey_doc = f.read(l_master_pkey)
        signuture = f.read(l_signuture)

        if not validate_identify(uuid, signuture, serial=sn,
                                 masterkey_doc=masterkey_doc):
           print("Validate identify failed (uuid=%s)", uuid)
           return

        master_pkey = KeyObject.load_keyobj(masterkey_doc)

        stbuf = f.read(64)
        st_ts, st_id, st_prog, st_head, st_err = \
            struct.unpack("dif16s32s", stbuf)

        head_module = st_head.decode("ascii",
                                     "ignore").strip("\x00")
        error_label = st_err.decode("ascii",
                                    "ignore").strip("\x00")

        self.device = Device(uuid, sn, master_pkey, 1)
        self.device.update_status(st_id=st_id, st_ts=st_ts, st_prog=st_prog,
                             head_module=head_module,
                             error_label=error_label)
Пример #12
0
class FIFOBuffer(object):
    def __init__(self, source, buffer_size=4096):
        self._source = source
        self._buffer_size = buffer_size
        self._fifo = BytesIO()
        self._write_pos = 0
        self._read_pos = 0

    @property
    def size(self):
        return self._write_pos - self._read_pos

    def read_from_source(self, n):
        raise NotImplementedError

    def fill_buffer(self):
        self._fifo.seek(self._write_pos)
        data = self.read_from_source(self._buffer_size)
        self._fifo.write(data)
        self._write_pos = self._fifo.tell()
        return len(data) > 0

    def read(self, n=-1):
        while n is None or n < 0 or self.size < n:
            if not self.fill_buffer():
                break
        self._fifo.seek(self._read_pos)
        out = self._fifo.read(n)
        self._read_pos = self._fifo.tell()
        if self._read_pos > self._buffer_size:
            self._fifo = BytesIO(self._fifo.read())
            self._write_pos = self._fifo.tell()
            self._read_pos = 0
        return out
Пример #13
0
def read_dict(fd):
    dct = {}
    buf = read_atom(fd)
    buflen = len(buf)
    buf = BytesIO(buf)
    while buf.tell() < buflen:
        key = read_atom(buf).decode("utf-8")
        tag = buf.read(1)
        if tag == b"D":
            val = read_dict(fd)
        elif tag == b"E":
            val = Entry(**read_dict(fd))
        elif tag == b"I":
            val = struct.unpack("<I", buf.read(4))[0]
        elif tag == b"L":
            val = struct.unpack("<Q", buf.read(8))[0]
        elif tag == b"F":
            val = struct.unpack("<d", buf.read(8))[0]
        elif tag == b"":
            val = read_atom(buf)
        elif tag == b"S":
            val = read_atom(buf).decode("utf-8")
        else:
            raise TypeError(tag)
        dct[key] = val
    return dct
Пример #14
0
 def iterblocks(self,M,bitlen=None):
     # pad M into M' (see spec, p.12-13)
     if bitlen is None:
         bitlen=len(M)*8
     else:
         M = (Bits(M,bitlen)//Bits(1,1)).bytes()
     # get BitPad flag:
     B = 1 if bitlen%8 else 0
     # pad M' into M'':
     l = len(M)
     lb = len(self.G)
     nb,rb = divmod(l,lb)
     lp=0
     if l==0 or rb>0:
         lp = lb-rb
         M += b'\0'*lp
         nb += 1
     # init generator:
     P = BytesIO(M)
     Ts = self.Ts
     Ts.First = 1
     for b in range(nb-1):
         m = P.read(lb)
         Ts.Position += lb
         yield (pack(Ts),m)
         Ts.First=0
     # last M'' block:
     Ts.Final = 1
     Ts.BitPad = B
     m = P.read(lb)
     Ts.Position += lb-lp
     yield (pack(Ts),m)
Пример #15
0
def bdecode(f_or_data):
    """
    bdecodes data by looking up the type byte,
    and using it to look up the respective decoding function,
    which in turn is used to return the decoded object

    The parameter can be a file opened in bytes mode,
    bytes or a string (the last of which will be decoded)
    """
    if isinstance(f_or_data, str):
        f_or_data = f_or_data.encode()
    if isinstance(f_or_data, bytes):
        f_or_data = BytesIO(f_or_data)

    #TODO: the following line is the only one that needs readahead.
    #peek returns a arbitrary amount of bytes, so we have to slice.
    if f_or_data.seekable():
        first_byte = f_or_data.read(1)
        f_or_data.seek(-1, SEEK_CUR)
    else:
        #FIXME: muted bug!
        first_byte = f_or_data.peek(1)[:1]  # pylint: disable=no-member
    btype = TYPES.get(first_byte)
    if btype is not None:
        return btype(f_or_data)
    else: #Used in dicts and lists to designate an end
        assert_btype(f_or_data.read(1), _TYPE_END)
        return None
Пример #16
0
def _pull_target(dtuf_main, target, expected_dgsts, expected_sizes, get_info, capfd):
    environ = {'DTUF_BLOB_INFO': '1'}
    environ.update(dtuf_main)
    assert dtuf.main.doit(['pull-target', pytest.repo, target], environ if get_info else dtuf_main) == 0
    # pylint: disable=protected-access
    encoding = capfd._capture.out.tmpfile.encoding
    capfd._capture.out.tmpfile.encoding = None
    out, err = capfd.readouterr()
    if get_info:
        outs = BytesIO(out)
        for i, size in enumerate(expected_sizes):
            assert outs.readline() == expected_dgsts[i].encode('utf-8') + b' ' + str(size).encode('utf-8') + b'\n'
            sha256 = hashlib.sha256()
            sha256.update(outs.read(size))
            assert sha256.hexdigest() == expected_dgsts[i]
        assert len(outs.read()) == 0
    else:
        pos = 0
        for i, size in enumerate(expected_sizes):
            sha256 = hashlib.sha256()
            sha256.update(out[pos:pos + size])
            pos += size
            assert sha256.hexdigest() == expected_dgsts[i]
        assert pos == len(out)
    assert err == ""
    capfd._capture.out.tmpfile.encoding = encoding
Пример #17
0
	def cache_data_index(self, hash):
		assert self.cdn
		assert self.base_path
		url, path = self.get_paths(hash, "index")
		if not os.path.exists(path):
			_prep_dir_for(path)
			r = requests.get(url)
			assert r.status_code == 200, r.status_code

			# calculate the .index md5
			data = BytesIO(r.content)
			data.seek(-12, os.SEEK_END)
			entries, = struct.unpack("i", data.read(4))
			blocks = ceil(entries / 170)
			blocks_len = blocks * 24

			data.seek(-28 - blocks_len, os.SEEK_END)
			index_hash = md5(data.read(blocks_len)).digest()
			hash_chk = data.read(8)
			# We only deal with 8 byte md5
			assert index_hash[:8] == hash_chk, "%r != %r" % (index_hash[:8], hash_chk)

			data.seek(0)
			for i in range(blocks):
				block_hash = md5(data.read(4096)).digest()
				pos = data.tell()
				data.seek(blocks * (4096+16) + i*8)
				hash_chk = data.read(8)
				assert block_hash[:8] == hash_chk, "%r != %r for block %r" % (block_hash[:8], hash_chk, i)
				data.seek(pos)

			# Write the file now
			with open(path, "wb") as f:
				logging.info("Writing to %r", path)
				f.write(r.content)
Пример #18
0
 def __init__(self, dictionary, parent):
     self.parent = parent
     for key in dictionary:
         setattr(self, key.lower(), dictionary[key])
     streamy = BytesIO()
     self.filename = os.path.join(self.parent.parent.parent.proj_dir,
                                  self.filename)
     with open(self.filename, 'br') as fn:
         streamy.write(fn.read())
     streamy.seek(0)
     file_magic = unpack('<I', streamy.read(4))[0]
     print('reading Image', self.filename)
     if file_magic != 52:
         raise IOError(
             'Jeol image file {} have not expected magic number {}'.format(
                 self.filename, file_magic))
     self.fileformat = streamy.read(32).rstrip(b'\x00').decode("utf-8")
     header, header_len, data = unpack('<III', streamy.read(12))
     streamy.seek(header+12)
     self.header = aggregate(streamy)
     streamy.seek(data+12)
     self.metadata = aggregate(streamy)
     s = self.metadata['Image']['Size']
     self.metadata['Image']['Bits'].resize((s[1], s[0]))
     self.image_array = self.metadata['Image']['Bits']
     self.gen_icon()
Пример #19
0
    def __parse_tag(self, tag, count):
        fileobj = BytesIO(tag)

        for i in range(count):
            size_data = fileobj.read(4)
            # someone writes wrong item counts
            if not size_data:
                break
            size = cdata.uint_le(size_data)
            flags = cdata.uint_le(fileobj.read(4))

            # Bits 1 and 2 bits are flags, 0-3
            # Bit 0 is read/write flag, ignored
            kind = (flags & 6) >> 1
            if kind == 3:
                raise APEBadItemError("value type must be 0, 1, or 2")
            key = value = fileobj.read(1)
            while key[-1:] != b'\x00' and value:
                value = fileobj.read(1)
                key += value
            if key[-1:] == b"\x00":
                key = key[:-1]
            value = fileobj.read(size)
            key = key.decode('ascii')

            if kind != BINARY:
                value = value.decode('utf-8')

            self[key] = APEValue(value, kind)
Пример #20
0
class DummySocket(object):
    def __init__(self):
        self.queue = []
        self._buffer = BytesIO()
        self.can_read = False

    @property
    def buffer(self):
        return memoryview(self._buffer.getvalue())

    def advance_buffer(self, amt):
        self._buffer.read(amt)

    def send(self, data):
        if not isinstance(data, bytes):
            raise TypeError()

        self.queue.append(data)

    def recv(self, l):
        return memoryview(self._buffer.read(l))

    def close(self):
        pass

    def readline(self):
        return memoryview(self._buffer.readline())

    def fill(self):
        pass
Пример #21
0
def decompress(raw, outfile=None):
    '''
    Decompress the specified data.

    :param raw: A bytestring or a file-like object open for reading
    :outfile: A file like object open for writing.
              The decompressed data is written into it. If not specified then a SpooledTemporaryFile
              is created and returned by this function.
    '''
    if isinstance(raw, bytes):
        raw = BytesIO(raw)
    outfile = outfile or SpooledTemporaryFile(50 * 1024 * 1024, '_xz_decompress')
    while True:
        read_stream(raw, outfile)
        pos = raw.tell()
        trail = raw.read(1024)
        if len(trail) < 20:
            break
        idx = trail.find(HEADER_MAGIC)
        if idx == -1:
            break
        if idx > -1:
            # Found another stream
            raw.seek(pos)
            if idx:
                padding = raw.read(idx)
                if padding.lstrip(b'\0') or len(padding) % 4:
                    raise InvalidXZ('Found trailing garbage between streams')
    return outfile
Пример #22
0
Файл: rsa.py Проект: olbat/o1b4t
def verify(readable, s, e, n):
    """
    Verify the signature _s_ of the message from _readable_ using the public
    exponent _d_, the modulus _n_ and the SHA-2 algorithm

    (see https://en.wikipedia.org/wiki/Digital_signature#How_they_work)
    """
    import sha2

    # compute the expected hash
    hc = getattr(sha2, SHA2_DIGEST)
    exph = BytesIO()
    exph.write(hc.digest(readable))
    exph.seek(0)
    exph = exph.read()

    # extract (decrypt) the hash from the signature
    sigio = BytesIO()
    sigio.write(s)
    sigio.seek(0)
    sigh = BytesIO()
    try:
        decrypt(sigio, sigh, e, n)
    except BadKeyError:
        return False
    sigh.seek(0)
    sigh = sigh.read()

    return exph == sigh
Пример #23
0
class TruncatedTailPipe(object):
    """
    Truncate the last `tail_size` bytes from the stream.
    """

    def __init__(self, output=None, tail_size=16):
        self.tail_size = tail_size
        self.output = output or BytesIO()
        self.buffer = BytesIO()

    def write(self, data):
        self.buffer.write(data)
        if self.buffer.tell() > self.tail_size:
            self._truncate_tail()

    def _truncate_tail(self):
        overflow_size = self.buffer.tell() - self.tail_size
        self.buffer.seek(0)
        self.output.write(self.buffer.read(overflow_size))
        remaining = self.buffer.read()
        self.buffer.seek(0)
        self.buffer.write(remaining)
        self.buffer.truncate()

    def close(self):
        return self.output
Пример #24
0
def decrypt(buf, passphrase):
    '''Decrypt *buf'''

    fh = BytesIO(buf)

    len_ = struct.unpack(b'<B', fh.read(struct.calcsize(b'<B')))[0]
    nonce = fh.read(len_)

    key = sha256(passphrase + nonce)
    cipher = aes_cipher(key) 
    hmac_ = hmac.new(key, digestmod=hashlib.sha256)

    # Read (encrypted) hmac
    hash_ = fh.read(HMAC_SIZE)

    buf = fh.read()
    buf = cipher.decrypt(buf)
    hmac_.update(buf)

    hash_ = cipher.decrypt(hash_)

    if not hmac.compare_digest(hash_, hmac_.digest()):
        raise ChecksumError('HMAC mismatch')

    return buf
Пример #25
0
def resize_image(image, full_size, thumb_size, filename, region=None):

	image = Image.open(BytesIO(image.read()))

	if region:
		cropped = image.crop(region)
	else:
		cropped = image

	PIL_TYPE = 'jpeg'
	CONTENT = 'image/jpeg'

	image_storage = BytesIO()
	cropped = cropped.resize(full_size)
	cropped.save(image_storage, PIL_TYPE, quality=100)
	image_storage.seek(0)

	thumb_storage = BytesIO()
	cropped.thumbnail(thumb_size, Image.ANTIALIAS)
	cropped.save(thumb_storage, PIL_TYPE, quality=100)
	thumb_storage.seek(0)

	image_file = SimpleUploadedFile(filename, image_storage.read(), content_type=CONTENT)
	thumb_file = SimpleUploadedFile(filename, thumb_storage.read(), content_type=CONTENT)

	return image_file, thumb_file
Пример #26
0
    def info(self):
        '''
        Return a string describing the loaded database version.

        @returns    English text string, or None if database is ancient.
        '''

        fp = BytesIO(self.cache)
        fp.seek(-3, os.SEEK_END)

        hasStructureInfo = False

        # first get past the database structure information
        for i in range(STRUCTURE_INFO_MAX_SIZE):
            if fp.read(3) == '\xFF\xFF\xFF':
                hasStructureInfo = True
                break

            fp.seek(-4, os.SEEK_CUR)

        if hasStructureInfo:
            fp.seek(-6, os.SEEK_CUR)
        else:
            # no structure info, must be pre Sep 2002 database, go back to end.
            fp.seek(-3, os.SEEK_END)

        for i in range(DATABASE_INFO_MAX_SIZE):
            if fp.read(3) == '\0\0\0':
                return fp.read(i)

            fp.seek(-4, os.SEEK_CUR)
Пример #27
0
    def write_data(self, item, last=False, stream_id=None):
        """
        Send a DATA frame that is tracked by the local state machine.

        Write a DATA frame using the H2 Connection object, will only work if the stream is in a state to send
        DATA frames. Uses flow control to split data into multiple data frames if it exceeds the size that can
        be in a single frame.

        :param item: The content of the DATA frame
        :param last: Flag to signal if this is the last frame in stream.
        :param stream_id: Id of stream to send frame on. Will use the request stream ID if None
        """
        if isinstance(item, (text_type, binary_type)):
            data = BytesIO(self.encode(item))
        else:
            data = item

        # Find the length of the data
        data.seek(0, 2)
        data_len = data.tell()
        data.seek(0)

        # If the data is longer than max payload size, need to write it in chunks
        payload_size = self.get_max_payload_size()
        while data_len > payload_size:
            self.write_data_frame(data.read(payload_size), False, stream_id)
            data_len -= payload_size
            payload_size = self.get_max_payload_size()

        self.write_data_frame(data.read(), last, stream_id)
Пример #28
0
class Buffer(object):

    chunk_size = 2 ** 14

    def __init__(self):
        self.buffer = BytesIO()
        self.target = None
        self.buffered = True

    def attachTarget(self, target):
        self.target = target
        task.cooperate(self._gen_data())

    def _gen_data(self):
        current = self.buffer.tell()
        self.buffer.seek(0, 0)
        data = self.buffer.read(self.chunk_size)
        self.last = len(data)
        self.buffer.seek(current)
        while data:
            yield
            self.target.write(data)
            current = self.buffer.tell()
            self.buffer.seek(self.last)
            data = self.buffer.read(self.chunk_size)
            self.last += len(data)
            self.buffer.seek(current)
        self.buffered = False

    def write(self, data):
        if self.buffered:
            self.buffer.write(data)
        else:
            self.target.write(data)
Пример #29
0
 def test_first_byte_timestamp_updated_on_read(self):
     s = BytesIO(b"foobar\nfoobar")
     s = tcp.Reader(s)
     s.read(1)
     assert s.first_byte_timestamp
     expected = s.first_byte_timestamp
     s.read(5)
     assert s.first_byte_timestamp == expected
Пример #30
0
 def test_reader_read_error(self):
     s = BytesIO(b"foobar\nfoobar")
     s = tcp.Reader(s)
     o = mock.MagicMock()
     o.read = mock.MagicMock(side_effect=socket.error)
     s.o = o
     with pytest.raises(exceptions.TcpDisconnect):
         s.read(10)
Пример #31
0
    def get_model(self):

        # Receive ModelDef NatPacket
        try:
            packet = self.comm_sock.get_data(NAT_REQUEST_MODELDEF)
        except NatUnrecognizedRequest:
            print(
                "Warning: Server Doesn't recognize request for Modeldef.  Will try again after next frame..."
            )
            return None

        # Parse the Packet
        data = BytesIO(packet._packet[4:])  # Skip Message ID, nBytes data

        d_name_list = []
        for el in range(unpack('i', data.read(4))[0]):  # nDatasets
            d_type = unpack('i', data.read(4))[0]
            d_name = strcpy(data)
            d_name_list.append(d_name)

            # MarkerSet
            if d_type == 0:
                if not d_name in self.marker_sets:
                    self.marker_sets[d_name] = MarkerSet(name=d_name)
                marker_set = self.marker_sets[d_name]
                nMarkers = unpack('i', data.read(4))[0]
                if len(marker_set.markers) != nMarkers:
                    marker_set.markers = []
                    for el2 in range(nMarkers):
                        name = strcpy(data)
                        marker_set.markers.append(Marker(name=name))
                else:
                    for el2 in range(nMarkers):  # nMarkers
                        name = strcpy(data)
                        marker_set.markers[el2].name = name

            # Rigid Body
            elif d_type == 1:
                id, parent_id, x_offset, y_offset, z_offset = unpack(
                    '2i3f', data.read(20))
                if not d_name in self.rigid_bodies:
                    body = RigidBody(name=d_name,
                                     id=id,
                                     parent_id=parent_id,
                                     offset=(x_offset, y_offset, z_offset))
                    self.rigid_bodies[d_name], self.rigid_bodies_by_id[
                        id] = body, body

            # Skeleton
            elif d_type == 2:
                raise NotImplementedError(
                    "Skeleton Processing not yet implemented! Remove them from Motive Tracking!"
                )  # TODO: Get skeletons working.
                """
                id = unpack('2i', data.read(4))[0]
                skeleton = Skeleton(id=id, name=d_name)

                for el2 in range(unpack('2i', data.read(4))[0]):  # nRigidBodies
                    name = strcpy()(data)
                    id, parent_id, x_offset, y_offset, z_offset = unpack('2i3f', data.read(20))
                    body = RigidBody(id=id, name=name, offset=(x_offset, y_offset, z_offset))
                    skeleton.rigid_bodies[name] = body
                    skeleton.rigid_bodies[id] = body
                skeletons[d_name] = skeleton
                skeletons[id] = skeleton
                """

        # Now, delete any items from the dictionaries that aren't in the server's model.
        for dictionary in [self.marker_sets, self.rigid_bodies]:
            for name in dictionary.keys():
                if name not in d_name_list:
                    del dictionary[name]

        return packet
Пример #32
0
 def read(b: BytesIO, *args) -> float:
     return unpack("d", b.read(8))[0]
 def test_limitless(self):
     s = BytesIO(b"f" * (50 * 1024))
     s = tcp.Reader(s)
     ret = s.read(-1)
     assert len(ret) == 50 * 1024
class FirmwareImage(object):
    def __init__(self, path_or_file, mode="r"):
        if getattr(path_or_file, "read", None):
            self._file = path_or_file
            self._do_close = False
            self._padding = 0
        else:
            if "b" not in mode:
                self._file = open(path_or_file, mode + "b")
            else:
                self._file = open(path_or_file, mode)
            self._do_close = True
            self._padding = 4

        if "r" in mode:
            self._contents = BytesIO(self._file.read())
        else:
            self._contents = BytesIO()
        self._do_write = False

        self._length = None
        self._descriptor_offset = None
        self._descriptor_bytes = None
        self._descriptor = None

    def __enter__(self):
        return self

    def __getattr__(self, attr):
        if attr == "write":
            self._do_write = True
        return getattr(self._contents, attr)

    def __iter__(self):
        return iter(self._contents)

    def __exit__(self, *args):
        if self._do_write:
            if getattr(self._file, "seek", None):
                self._file.seek(0)
            self._file.write(self._contents.getvalue())
            if self._padding:
                self._file.write(b'\xff' * self._padding)

        if self._do_close:
            self._file.close()

    def _write_descriptor_raw(self):
        # Seek to the appropriate location, write the serialized
        # descriptor, and seek back.
        prev_offset = self._contents.tell()
        self._contents.seek(self._descriptor_offset)
        self._contents.write(self._descriptor.pack())
        self._contents.seek(prev_offset)

    def write_descriptor(self):
        # Set the descriptor's length and CRC to the values required for
        # CRC computation
        self.app_descriptor.image_size = self.length
        self.app_descriptor.crc32_block1 = 0
        self.app_descriptor.crc32_block2 = 0

        self._write_descriptor_raw()

        content = bytearray(self._contents.getvalue())
        if self._padding:
            content += bytearray.fromhex("ff" * self._padding)

        # Update the descriptor's CRC based on the computed value and write
        # it out again

        self.app_descriptor.crc32_block1 = self.crc32(
            content[:self.app_descriptor_offset +
                    len(AppDescriptor.SIGNATURE)])
        b2 = self.app_descriptor_offset + len(
            AppDescriptor.SIGNATURE) + AppDescriptor.DESLENGTH
        self.app_descriptor.crc32_block2 = self.crc32(content[b2:])

        self._write_descriptor_raw()

    def crc32(self, bytes, crc=0):

        for byte in bytes:
            index = (crc ^ byte) & 0xff
            crc = crctab[index] ^ (crc >> 8)
        return crc

    @property
    def padding(self):
        return self._padding

    @property
    def length(self):
        if not self._length:
            # Find the length of the file by seeking to the end and getting
            # the offset
            prev_offset = self._contents.tell()
            self._contents.seek(0, os.SEEK_END)
            self._length = self._contents.tell()
            if self._padding:
                fill = self._padding - (self._length % self._padding)
                if fill:
                    self._length += fill
                self._padding = fill
            self._contents.seek(prev_offset)

        return self._length

    @property
    def app_descriptor_offset(self):
        if not self._descriptor_offset:
            # Save the current position
            prev_offset = self._contents.tell()
            # Check each byte in the file to see if a valid descriptor starts
            # at that location. Slow, but not slow enough to matter.
            offset = 0
            while offset < self.length - AppDescriptor.LENGTH:
                self._contents.seek(offset)
                try:
                    # If this throws an exception, there isn't a valid
                    # descriptor at this offset
                    AppDescriptor(self._contents.read(AppDescriptor.LENGTH))
                except Exception:
                    offset += 1
                else:
                    self._descriptor_offset = offset
                    break
            # Go back to the previous position
            self._contents.seek(prev_offset)
            if not self._descriptor_offset:
                raise Exception('AppDescriptor not found')

        return self._descriptor_offset

    @property
    def app_descriptor(self):
        if not self._descriptor:
            # Save the current position
            prev_offset = self._contents.tell()
            # Jump to the descriptor adn parse it
            self._contents.seek(self.app_descriptor_offset)
            self._descriptor_bytes = self._contents.read(AppDescriptor.LENGTH)
            self._descriptor = AppDescriptor(self._descriptor_bytes)
            # Go back to the previous offset
            self._contents.seek(prev_offset)

        return self._descriptor

    @app_descriptor.setter
    def app_descriptor(self, value):
        self._descriptor = value
Пример #35
0
 def getPDULength(self, data: bytes) -> int:
     stream = BytesIO(data)
     stream.read(1)
     return self.parseLength(stream)
Пример #36
0
def create_thumbnail(image_bytes: BytesIO,
                     django_type: str,
                     file_field_name: str,
                     width: int = 0,
                     height: int = 0):
    """
    :param image_bytes:
    :param django_type
    :param file_field_name
    :param width:
    :param height:
    :return:
    """
    # original code for this method came from
    # http://snipt.net/danfreak/generate-thumbnails-in-django-with-pil/

    # Open original photo which we want to thumbnail using PIL's Image
    image = Image.open(image_bytes)

    # Set our max thumbnail size in a tuple (max width, max height)
    if not width and not height:
        thumbnail_size = (
            image.width,
            image.height,
        )

    elif not width and height:
        thumbnail_size = (
            image.width * (height / image.height),
            height,
        )

    elif width and not height:
        thumbnail_size = (
            width,
            image.height * (width / image.width),
        )

    else:
        thumbnail_size = (
            width,
            height,
        )

    width = thumbnail_size[0]
    height = thumbnail_size[1]

    logger.debug('Set image size %d/%d' % (width, height))

    if file_type(django_type) == ImageType.image:
        pil_type = get_image_extension(django_type)['pil']

    else:
        raise Exception(file_type(django_type))

    # Convert to RGB if necessary
    # Thanks to Limodou on DjangoSnippets.org
    # http://www.djangosnippets.org/snippets/20/
    #
    # I commented this part since it messes up my png files
    #
    # if image.mode not in ('L', 'RGB'):
    #    image = image.convert('RGB')

    # We use our PIL Image object to create the thumbnail, which already
    # has a thumbnail() convenience method that constrains proportions.
    # Additionally, we use Image.ANTIALIAS to make the image look better.
    # Without antialiasing the image pattern artifacts may result.
    image.thumbnail(thumbnail_size, Image.ANTIALIAS)

    # Save the thumbnail
    temp_handle = BytesIO()
    image.save(temp_handle, pil_type)
    temp_handle.seek(0)

    # Save image to a SimpleUploadedFile which can be saved into ImageField
    suf = SimpleUploadedFile(os.path.split(file_field_name)[-1],
                             temp_handle.read(),
                             content_type=django_type)

    return suf
Пример #37
0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @Time    : 2019/9/13 13:45
# @Author  : su
# @File    : BytesIO.py
"""
StringIO操作的只能是str,如果要操作二进制数据,就需要使用BytesIO。

StringIO和BytesIO是在内存中操作str和bytes的方法,使得和读写文件具有一致的接口。
"""
from io import BytesIO

# write to BytesIO
f = BytesIO()
f.write(b'hello')
f.write(b'  ')
f.write(b'world!')
print(f.getvalue())

# read from BytesIO
data = '人闲桂花落,夜静春山空。月出惊山鸟,时鸣春涧中。'.encode('utf-8')
f = BytesIO(data)
print(f.read())
Пример #38
0
def createGeoJson(geoms, output=None, srs=4326, topo=False, fill=''):
    """Convert a set of geometries to a geoJSON object"""
    if (srs):
        srs = SRS.loadSRS(srs)

    # arrange geom, index, and data
    if isinstance(geoms, ogr.Geometry):  # geoms is a single geometry
        finalGeoms = [
            geoms,
        ]
        data = None
        index = [
            0,
        ]

    elif isinstance(geoms, pd.Series):
        index = geoms.index
        finalGeoms = geoms.values
        data = None

    elif isinstance(geoms, pd.DataFrame):
        index = geoms.index
        finalGeoms = geoms.geom.values
        data = geoms.loc[:, geoms.columns != 'geom']
        data["_index"] = index
    else:
        finalGeoms = list(geoms)
        data = None
        index = list(range(len(finalGeoms)))

    if len(finalGeoms) == 0:
        raise GeoKitVectorError("Empty geometry list given")

    # Transform?
    if not srs is None:
        finalGeoms = GEOM.transform(finalGeoms, toSRS=srs)

    # Make JSON object
    from io import BytesIO
    if not output is None and not isinstance(output, str):
        if not output.writable():
            raise GeoKitVectorError("Output object is not writable")

        if topo:
            fo = BytesIO()
        else:
            fo = output
    elif isinstance(output, str) and not topo:
        fo = open(output, "wb")
    else:
        fo = BytesIO()

    fo.write(
        bytes('{"type":"FeatureCollection","features":[', encoding='utf-8'))

    for j, i, g in zip(range(len(index)), index, finalGeoms):

        fo.write(
            bytes('%s{"type":"Feature",' % ("" if j == 0 else ","),
                  encoding='utf-8'))
        if data is None:
            fo.write(
                bytes('"properties":{"_index":%s},' % str(i),
                      encoding='utf-8'))
        else:
            fo.write(
                bytes('"properties":%s,' % data.loc[i].fillna(fill).to_json(),
                      encoding='utf-8'))

        fo.write(bytes('"geometry":%s}' % g.ExportToJson(), encoding='utf-8'))
        #fo.write(bytes('"geometry": {"type": "Point","coordinates": [125.6, 10.1] }}', encoding='utf-8'))
    fo.write(bytes("]}", encoding='utf-8'))
    fo.flush()

    # Put in the right format
    if topo:
        from topojson import conversion
        from io import TextIOWrapper

        fo.seek(0)
        topo = conversion.convert(
            TextIOWrapper(fo),
            object_name="primary")  # automatically closes fo
        topo = str(topo).replace("'", '"')

    # Done!
    if output is None:
        if topo:
            return topo
        else:
            fo.seek(0)
            geojson = fo.read()
            fo.close()
            return geojson.decode('utf-8')

    elif isinstance(output, str):
        if topo:
            with open(output, "w") as fo:
                fo.write(topo)
        else:
            pass  # we already wrote to the file!
        return output

    else:
        if topo:
            output.write(bytes(topo, encoding='utf-8'))
        else:
            pass  # We already wrote to the file!
        return None
Пример #39
0
def _read_int(x: BytesIO, n_bytes: int, signed: bool = True) -> int:
    return int.from_bytes(x.read(n_bytes), byteorder="little", signed=signed)
Пример #40
0
# f.close()

# with open('/Users/michael/test.txt', 'w') as f:
#     f.write('Hello, world!')

print('*****************************************')
# StringIO:在内存中读写字符串
from io import StringIO

f = StringIO()
f.write("hello")
print('write:' + f.getvalue())  # 通过getvalue()来读取内存中的字符串

f2 = StringIO('hello\nHi\nGoodBye')
while True:
    s = f2.readline()
    if s == '':
        break
    print(s)

#  BytesIO: 操作二进制文件   StringIO只能操作字符串
from io import BytesIO

b = BytesIO()
b.write('中文'.encode('utf-8'))
print(b.getvalue())

b2 = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(b2.getvalue())
print(b2.read())
Пример #41
0
def _read_byte(stream: io.BytesIO) -> int:
    return struct.unpack_from('B', stream.read(1))[0]
Пример #42
0
class Telegram(object):

    TYPE = 0    # type byte offset
    CODE = 1    # code byte offset
    DATA = 2    # data byte offset

    TYPE_NOT_DIRECT = 0x01          # system vs. direct type
    TYPE_REPLY = 0x02               # reply type (from NXT brick)
    TYPE_REPLY_NOT_REQUIRED = 0x80  # reply not required flag

    def __init__(self, direct=False, opcode=0, reply_req=True, pkt=None):
        self.reply = True
        if pkt:
            self.pkt = BytesIO(pkt)
            self.typ = self.parse_u8()
            self.opcode = self.parse_u8()
            if not self.is_reply():
                raise InvalidReplyError
            if self.opcode != opcode:
                raise InvalidOpcodeError(self.opcode)
        else:
            self.pkt = BytesIO()
            typ = 0
            if not direct:
                typ |= Telegram.TYPE_NOT_DIRECT
            if not reply_req:
                typ |= Telegram.TYPE_REPLY_NOT_REQUIRED
                self.reply = False
            self.add_u8(typ)
            self.add_u8(opcode)

    def __str__(self):
        return self.pkt.getvalue()

    def is_reply(self):
        return self.typ == Telegram.TYPE_REPLY

    def add_string(self, n_bytes, v):
        self.pkt.write(pack('%ds' % n_bytes, v))

    def add_filename(self, fname):
        self.pkt.write(pack('20s', fname))

    def add_s8(self, v):
        self.pkt.write(pack('<b', v))

    def add_u8(self, v):
        self.pkt.write(pack('<B', v))

    def add_s16(self, v):
        self.pkt.write(pack('<h', v))

    def add_u16(self, v):
        self.pkt.write(pack('<H', v))

    def add_s32(self, v):
        self.pkt.write(pack('<i', v))

    def add_u32(self, v):
        self.pkt.write(pack('<I', v))

    def parse_string(self, n_bytes=0):
        if n_bytes:
            return unpack('%ss' % n_bytes,
                self.pkt.read(n_bytes))[0]
        else:
            return self.pkt.read()

    def parse_s8(self):
        return unpack('<b', self.pkt.read(1))[0]

    def parse_u8(self):
        return unpack('<B', self.pkt.read(1))[0]

    def parse_s16(self):
        return unpack('<h', self.pkt.read(2))[0]

    def parse_u16(self):
        return unpack('<H', self.pkt.read(2))[0]

    def parse_s32(self):
        return unpack('<i', self.pkt.read(4))[0]

    def parse_u32(self):
        return unpack('<I', self.pkt.read(4))[0]

    def check_status(self):
        nxt.error.check_status(self.parse_u8())
 def export_normal_sales(self):
     if self.start_period_id.date_start >= self.end_period_id.date_stop:
         raise Warning("End Date is Must be Greater then Start Date! ")
     from_date = self.start_period_id.date_start
     to_date = self.end_period_id.date_stop
     warehouse_ids = []
     for warehouse in self.warehouse_ids:
         warehouse_ids.append(warehouse.id)
     warehouse_ids = '(' + str(warehouse_ids or [0]).strip('[]') + ')'        
             
     file_header1 = {'company': 0, 'product_name' : 1, 'warehouse': 2, 'sku':3}
     column_no = 4
     column_list = ""
     select_column_list = ""
     file_header2 = {} 
     file_header = {}
     periods = self.start_period_id.search([('date_start', '>=', from_date), ('date_stop', '<=', to_date)])
     if periods:
         for period in periods:
             column_list = column_list + ", %s float" % (period.code)  # #  ,Jan2017 float, Jan2017 float
             select_column_list = select_column_list + ",coalesce(%s,0) as %s" % (period.code, period.code.lower())  # ,coalesce(Jan2017,0) as jan2017
             file_header2.update({period.code.lower():column_no})
             column_no += 1
     sales_pivot = {}
     file_header.update(file_header1) 
     file_header.update(file_header2)
     try:
             query = """
                 select
         product,
         warehouse,
         sku,
         product_name,
         company
         %s    
         from crosstab(
          'Select 
         ware.name || '' - '' || prod.default_code as product,
         ware.name as warehouse,
         prod.default_code as sku,
         tmpl.name as product_name,
         cmp.name as company,
         sale_period,
         sum(sale_qty) as sale_qty    
          from     
          (
         Select sale_date, sale_period, product_id, warehouse_id, sum(sale_qty) as sale_qty
         From
         (    
             Select 
                 move.date::date as sale_date,
                 (select code from requisition_period_ept where date_start <= move.date::date and date_stop >= move.date::date) sale_period,
                 move.product_id,
                 type.warehouse_id,
                 move.product_qty as sale_qty
             from     
                 stock_move as move 
                     Inner Join stock_picking pick on pick.id= move.picking_id
                     Inner Join stock_picking_type type on type.id = pick.picking_type_id
                     inner Join product_product prod on prod.id = move.product_id
                     Inner Join product_template tmpl on tmpl.id = prod.product_tmpl_id
             where move.state = ''done'' AND tmpl.active=true AND prod.active=true AND
                 type.warehouse_id in %s AND
                 move.date between ''%s 00:00:00'' and ''%s 23:59:59'' AND
                 move.location_dest_id in (select id from stock_location where usage = ''customer'')
             
             Union All
         
             Select 
                 move.date::date as sale_date,
                 (select code from requisition_period_ept where date_start <= move.date::date and date_stop >= move.date::date) sale_period,
                 move.product_id,
                 type.warehouse_id,
                 move.product_qty * -1 as sale_qty
             from     
                 stock_move as move 
                     Inner Join stock_picking pick on pick.id= move.picking_id
                     Inner Join stock_picking_type type on type.id = pick.picking_type_id
                     inner Join product_product prod on prod.id = move.product_id
                     Inner Join product_template tmpl on tmpl.id = prod.product_tmpl_id
             where move.state = ''done'' AND tmpl.active=true AND prod.active=true AND
                 type.warehouse_id in %s AND
                 move.date between ''%s 00:00:00'' and ''%s 23:59:59'' AND
                 move.location_id in (select id from stock_location where usage = ''customer'') 
              
             Union All
          
             Select 
                 period.date_start,
                 period.code,
                 prod.id,
                 wh.id,
                 0
             from product_product prod, stock_warehouse wh, requisition_period_ept period, product_template tmpl
             where 
                 tmpl.id = prod.product_tmpl_id and 
                 wh.id in %s AND 
                 period.date_start >= ''%s'' and period.date_stop <= ''%s'' AND 
                 tmpl.type=''product'' and prod.active=true and tmpl.active=true
         ) Sales
         Group by sale_date, sale_period, product_id, warehouse_id    
          
          )T
         Inner Join product_product prod on prod.id = T.product_id
         Inner Join product_template tmpl on tmpl.id = prod.product_tmpl_id
         Inner Join stock_warehouse ware on ware.id = T.warehouse_id
         Inner Join res_company cmp on cmp.id = ware.company_id
          group by prod.id, ware.name, sale_period, prod.default_code,cmp.name, tmpl.name
          order by 1,3;',
          'Select code from requisition_period_ept where date_start >= ''%s'' and date_stop <= ''%s'' order by date_start'
          )
          as newtable (
         product varchar, warehouse varchar, sku varchar, product_name varchar,
         company varchar %s
         );""" % (select_column_list, warehouse_ids, from_date, to_date, warehouse_ids, from_date, to_date, warehouse_ids, from_date, to_date, from_date, to_date, column_list)
             self._cr.execute(query)
             sales_pivot = self._cr.dictfetchall()
     except psycopg2.DatabaseError as e:
         if e.pgcode == '58P01':
             raise Warning("To enable Export Forecast Sale Rule, Please install Postgresql - Contrib in Postgresql")     
     
     if sales_pivot:
         workbook = xlwt.Workbook()
         worksheet = workbook.add_sheet("Normal Sales Data", cell_overwrite_ok=True)
         # ## it will return sorted data in list of tuple (sorting based on value)
         sorted_file_header = sorted(file_header.items(), key=operator.itemgetter(1))
         header_bold = xlwt.easyxf("font: bold on, height 250; pattern: pattern solid, fore_colour gray25;alignment: horizontal center")
         column = 0
         for header in sorted_file_header:
             worksheet.write(0, header[1], header[0], header_bold)
             column += 1
         row = 1
         for sale in sales_pivot:
             for header in sorted_file_header:
                 col_no = header[1]
                 value = sale[header[0]]
                 worksheet.write(row, col_no, value)
             row += 1   
         
         fp = BytesIO()
         workbook.save(fp)
         fp.seek(0)
         report_data_file = base64.encodestring(fp.read())
         fp.close()
         self.write({'datas':report_data_file})
        
         return {
         'type' : 'ir.actions.act_url',
         'url':   'web/content/?model=import.export.forecast.sale.ept&field=datas&download=true&id=%s&filename=normal_sale_data.xls' % (self.id),
         'target': 'new',
         }
Пример #44
0
    def run(self):
        #results = {} # {'file' : error_code,...}

        STATE_DONE = 0
        STATE_ABORTED = 10
        STATE_SUCCESS = 20
        STATE_BUSY = 25
        STATE_READ_SENDER_INFO = 30
        STATE_PRERENDER = 40
        STATE_COUNT_PAGES = 50
        STATE_NEXT_RECIPIENT = 60
        STATE_COVER_PAGE = 70
        STATE_SINGLE_FILE = 80
        STATE_MERGE_FILES = 90
        STATE_SINGLE_FILE = 100
        STATE_SEND_FAX = 110
        STATE_CLEANUP = 120
        STATE_ERROR = 130

        next_recipient = self.next_recipient_gen()

        state = STATE_READ_SENDER_INFO
        self.rendered_file_list = []

        while state != STATE_DONE: # --------------------------------- Fax state machine
            if self.check_for_cancel():
                state = STATE_ABORTED

            log.debug("STATE=(%d, 0, 0)" % state)

            if state == STATE_ABORTED: # --------------------------------- Aborted (10, 0, 0)
                log.error("Aborted by user.")
                self.write_queue((STATUS_IDLE, 0, ''))
                state = STATE_CLEANUP


            elif state == STATE_SUCCESS: # --------------------------------- Success (20, 0, 0)
                log.debug("Success.")
                self.write_queue((STATUS_COMPLETED, 0, ''))
                state = STATE_CLEANUP


            elif state == STATE_ERROR: # --------------------------------- Error (130, 0, 0)
                log.error("Error, aborting.")
                self.write_queue((STATUS_ERROR, 0, ''))
                state = STATE_CLEANUP


            elif state == STATE_BUSY: # --------------------------------- Busy (25, 0, 0)
                log.error("Device busy, aborting.")
                self.write_queue((STATUS_BUSY, 0, ''))
                state = STATE_CLEANUP


            elif state == STATE_READ_SENDER_INFO: # --------------------------------- Get sender info (30, 0, 0)
                log.debug("%s State: Get sender info" % ("*"*20))
                state = STATE_PRERENDER
                try:
                    try:
                        self.dev.open()
                    except Error as e:
                        log.error("Unable to open device (%s)." % e.msg)
                        state = STATE_ERROR
                    else:
                        try:
                            self.sender_name = self.dev.station_name
                            log.debug("Sender name=%s" % self.sender_name)
                            self.sender_fax = self.dev.phone_num
                            log.debug("Sender fax=%s" % self.sender_fax)
                        except Error:
                            log.error("HTTP GET failed!")
                            state = STATE_ERROR

                finally:
                    self.dev.close()


            elif state == STATE_PRERENDER: # --------------------------------- Pre-render non-G4 files (40, 0, 0)
                log.debug("%s State: Pre-render non-G4 files" % ("*"*20))
                state = self.pre_render(STATE_COUNT_PAGES)

            elif state == STATE_COUNT_PAGES: # --------------------------------- Get total page count (50, 0, 0)
                log.debug("%s State: Get total page count" % ("*"*20))
                state = self.count_pages(STATE_NEXT_RECIPIENT)

            elif state == STATE_NEXT_RECIPIENT: # --------------------------------- Loop for multiple recipients (60, 0, 0)
                log.debug("%s State: Next recipient" % ("*"*20))
                state = STATE_COVER_PAGE

                try:
                    recipient = next(next_recipient)
                    log.debug("Processing for recipient %s" % recipient['name'])
                    self.write_queue((STATUS_SENDING_TO_RECIPIENT, 0, recipient['name']))
                except StopIteration:
                    state = STATE_SUCCESS
                    log.debug("Last recipient.")
                    continue

                recipient_file_list = self.rendered_file_list[:]


            elif state == STATE_COVER_PAGE: # --------------------------------- Create cover page (70, 0, 0)
                log.debug("%s State: Render cover page" % ("*"*20))
                state = self.cover_page(recipient)


            elif state == STATE_SINGLE_FILE: # --------------------------------- Special case for single file (no merge) (80, 0, 0)
                log.debug("%s State: Handle single file" % ("*"*20))
                state = self.single_file(STATE_SEND_FAX)

            elif state == STATE_MERGE_FILES: # --------------------------------- Merge multiple G4 files (90, 0, 0)
                log.debug("%s State: Merge multiple files" % ("*"*20))
                state = self.merge_files(STATE_SEND_FAX)

            elif state == STATE_SEND_FAX: # --------------------------------- Send fax state machine (110, 0, 0)
                log.debug("%s State: Send fax" % ("*"*20))
                state = STATE_NEXT_RECIPIENT

                FAX_SEND_STATE_DONE = 0
                FAX_SEND_STATE_ABORT = 10
                FAX_SEND_STATE_ERROR = 20
                FAX_SEND_STATE_BUSY = 25
                FAX_SEND_STATE_SUCCESS = 30
                FAX_SEND_STATE_DEVICE_OPEN = 40
                FAX_SEND_STATE_BEGINJOB = 50
                FAX_SEND_STATE_DOWNLOADPAGES = 60
                FAX_SEND_STATE_ENDJOB = 70
                FAX_SEND_STATE_CANCELJOB = 80
                FAX_SEND_STATE_CLOSE_SESSION = 170

                monitor_state = False
                fax_send_state = FAX_SEND_STATE_DEVICE_OPEN

                while fax_send_state != FAX_SEND_STATE_DONE:

                    if self.check_for_cancel():
                        log.error("Fax send aborted.")
                        fax_send_state = FAX_SEND_STATE_ABORT

                    if monitor_state:
                        fax_state = self.getFaxDownloadState()
                        if not fax_state in (pml.UPDN_STATE_XFERACTIVE, pml.UPDN_STATE_XFERDONE):
                            log.error("D/L error state=%d" % fax_state)
                            fax_send_state = FAX_SEND_STATE_ERROR
                            state = STATE_ERROR

                    log.debug("STATE=(%d, %d, 0)" % (STATE_SEND_FAX, fax_send_state))

                    if fax_send_state == FAX_SEND_STATE_ABORT: # -------------- Abort (110, 10, 0)
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_CANCELJOB
                        state = STATE_ABORTED

                    elif fax_send_state == FAX_SEND_STATE_ERROR: # -------------- Error (110, 20, 0)
                        log.error("Fax send error.")
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_CLOSE_SESSION
                        state = STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_BUSY: # -------------- Busy (110, 25, 0)
                        log.error("Fax device busy.")
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_CLOSE_SESSION
                        state = STATE_BUSY

                    elif fax_send_state == FAX_SEND_STATE_SUCCESS: # -------------- Success (110, 30, 0)
                        log.debug("Fax send success.")
                        monitor_state = False
                        fax_send_state = FAX_SEND_STATE_CLOSE_SESSION
                        state = STATE_NEXT_RECIPIENT

                    elif fax_send_state == FAX_SEND_STATE_DEVICE_OPEN: # -------------- Device open (110, 40, 0)
                        log.debug("%s State: Open device" % ("*"*20))
                        fax_send_state = FAX_SEND_STATE_BEGINJOB
                        try:
                            self.dev.open()
                        except Error as e:
                            log.error("Unable to open device (%s)." % e.msg)
                            fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            if self.dev.device_state == DEVICE_STATE_NOT_FOUND:
                                fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_BEGINJOB: # -------------- BeginJob (110, 50, 0)
                        log.debug("%s State: BeginJob" % ("*"*20))

                        try:
                            ff = open(self.f, 'rb')
                        except IOError:
                            log.error("Unable to read fax file.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                            continue

                        try:
                            header = ff.read(FILE_HEADER_SIZE)
                        except IOError:
                            log.error("Unable to read fax file.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                            continue

                        magic, version, total_pages, hort_dpi, vert_dpi, page_size, \
                            resolution, encoding, reserved1, reserved2 = self.decode_fax_header(header)

                        if magic != b'hplip_g3':
                            log.error("Invalid file header. Bad magic.")
                            fax_send_state = FAX_SEND_STATE_ERROR
                        else:
                            log.debug("Magic=%s Ver=%d Pages=%d hDPI=%d vDPI=%d Size=%d Res=%d Enc=%d" %
                                      (magic, version, total_pages, hort_dpi, vert_dpi, page_size,
                                       resolution, encoding))

                        job_id = self.job_id
                        delay = 0
                        faxnum = recipient['fax']
                        speeddial = 0

                        if resolution == RESOLUTION_STD:
                            res = "STANDARD"
                        elif resolution == RESOLUTION_FINE:
                            res = "FINE"
                        elif resolution == RESOLUTION_300DPI:
                            res = "SUPERFINE"

                        soap = utils.cat(
"""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Body><Fax:BeginJob xmlns:Fax="urn:Fax"><ticket xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Fax:Ticket"><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xsi:type="xsd:string">$job_id</jobId><resolution xsi:type="Fax:Resolution">$res</resolution><delay xmlns:xsd="http://www.w3.org/2001/XMLSchema" xsi:type="xsd:positiveInteger">$delay</delay><phoneNumber xmlns:xsd="http://www.w3.org/2001/XMLSchema" xsi:type="xsd:string">$faxnum</phoneNumber><speedDial xmlns:xsd="http://www.w3.org/2001/XMLSchema" xsi:type="xsd:positiveInteger">$speeddial</speedDial></ticket></Fax:BeginJob></SOAP-ENV:Body></SOAP-ENV:Envelope>""")

                        data = self.format_http(soap.encode('utf-8'))

                        log.log_data(data)

                        if log.is_debug():
                            open('beginjob.log', 'wb').write(data)
                        self.dev.openSoapFax()
                        self.dev.writeSoapFax(data)
                        ret = BytesIO()

                        while self.dev.readSoapFax(8192, ret, timeout=5):
                            pass

                        ret = ret.getvalue()

                        if log.is_debug():
                            open('beginjob_ret.log', 'wb').write(ret)
                        log.log_data(ret)
                        self.dev.closeSoapFax()

                        if self.get_error_code(ret.decode('utf-8')) == HTTP_OK:
                            fax_send_state = FAX_SEND_STATE_DOWNLOADPAGES
                        else:
                            fax_send_state = FAX_SEND_STATE_ERROR


                    elif fax_send_state == FAX_SEND_STATE_DOWNLOADPAGES: # -------------- DownloadPages (110, 60, 0)
                        log.debug("%s State: DownloadPages" % ("*"*20))
                        page = BytesIO()
                        for p in range(total_pages):

                            if self.check_for_cancel():
                                fax_send_state = FAX_SEND_STATE_ABORT

                            if fax_send_state == FAX_SEND_STATE_ABORT:
                                break

                            try:
                                header = ff.read(PAGE_HEADER_SIZE)
                            except IOError:
                                log.error("Unable to read fax file.")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                continue

                            page_num, ppr, rpp, bytes_to_read, thumbnail_bytes, reserved2 = \
                                self.decode_page_header(header)

                            log.debug("Page=%d PPR=%d RPP=%d BPP=%d Thumb=%d" %
                                      (page_num, ppr, rpp, bytes_to_read, thumbnail_bytes))

                            if ppr != PIXELS_PER_LINE:
                                log.error("Pixels per line (width) must be %d!" % PIXELS_PER_LINE)

                            page.write(ff.read(bytes_to_read))
                            thumbnail = ff.read(thumbnail_bytes) # thrown away for now (should be 0 read)
                            page.seek(0)

                            try:
                                data = page.read(bytes_to_read)
                            except IOError:
                                log.error("Unable to read fax file.")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                break

                            if data == b'':
                                log.error("No data!")
                                fax_send_state = FAX_SEND_STATE_ERROR
                                break

                            height = rpp
                            job_id = self.job_id

                            soap = utils.cat(
"""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Header><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:string" SOAP-ENV:mustUnderstand="1">$job_id</jobId></SOAP-ENV:Header><SOAP-ENV:Body><Fax:DownloadPage xmlns:Fax="urn:Fax"><height xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:positiveInteger">$height</height></Fax:DownloadPage></SOAP-ENV:Body></SOAP-ENV:Envelope>""")

                            m = dime.Message()
                            m.add_record(dime.Record(b"cid:id0", b"http://schemas.xmlsoap.org/soap/envelope/",
                                dime.TYPE_T_URI, to_bytes_utf8(soap)))

                            m.add_record(dime.Record(b"", b"image/g4fax", dime.TYPE_T_MIME, data))

                            output = BytesIO()
                            m.generate(output)
                            data = self.format_http(output.getvalue(), content_type="application/dime")
                            log.log_data(data)
                            if log.is_debug():
                                           open('downloadpages%d.log' % p, 'wb').write(data)
                            try:
                                self.dev.writeSoapFax(data)
                            except Error:
                                fax_send_state = FAX_SEND_STATE_ERROR

                            ret = BytesIO()

                            try:
                                while self.dev.readSoapFax(8192, ret, timeout=5):
                                    pass
                            except Error:
                                fax_send_state = FAX_SEND_STATE_ERROR

                            ret = ret.getvalue()

                            if log.is_debug():
                                open('downloadpages%d_ret.log' % p, 'wb').write(ret)

                            log.log_data(ret)
                            self.dev.closeSoapFax()

                            if self.get_error_code(ret.decode('utf-8')) != HTTP_OK:
                                fax_send_state = FAX_SEND_STATE_ERROR
                                break

                            page.truncate(0)
                            page.seek(0)

                        else:
                            fax_send_state = FAX_SEND_STATE_ENDJOB


                    elif fax_send_state == FAX_SEND_STATE_ENDJOB: # -------------- EndJob (110, 70, 0)
                        log.debug("%s State: EndJob" % ("*"*20))

                        job_id = self.job_id

                        soap = utils.cat(
"""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Header><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:string" SOAP-ENV:mustUnderstand="1">$job_id</jobId></SOAP-ENV:Header><SOAP-ENV:Body><Fax:EndJob xmlns:Fax="urn:Fax"><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:string">$job_id</jobId></Fax:EndJob></SOAP-ENV:Body></SOAP-ENV:Envelope>""")

                        data = self.format_http(soap.encode('utf-8'))

                        log.log_data(data)

                        if log.is_debug():
                            open('endjob.log', 'wb').write(data)

                        self.dev.writeSoapFax(data)
                        ret = BytesIO()

                        while self.dev.readSoapFax(8192, ret, timeout=5):
                            pass

                        ret = ret.getvalue()

                        if log.is_debug():
                            open('endjob_ret.log', 'wb').write(ret)

                        log.log_data(ret)
                        self.dev.closeSoapFax()

                        if self.get_error_code(ret.decode('utf-8')) == HTTP_OK:
                            fax_send_state = FAX_SEND_STATE_SUCCESS
                        else:
                            fax_send_state = FAX_SEND_STATE_ERROR

                    elif fax_send_state == FAX_SEND_STATE_CANCELJOB: # -------------- CancelJob (110, 80, 0)
                        log.debug("%s State: CancelJob" % ("*"*20))

                        job_id = self.job_id

                        soap = utils.cat(
"""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Header><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:string" SOAP-ENV:mustUnderstand="1">$job_id</jobId></SOAP-ENV:Header><SOAP-ENV:Body><Fax:CancelJob xmlns:Fax="urn:Fax"><jobId xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xsd:string">$job_id</jobId></Fax:CancelJob></SOAP-ENV:Body></SOAP-ENV:Envelope>""")

                        data = self.format_http(soap.encode('utf-8'))

                        log.log_data(data)

                        if log.is_debug():
                            open('canceljob.log', 'wb').write(data)

                        self.dev.writeSoapFax(data)
                        ret = BytesIO()

                        while self.dev.readSoapFax(8192, ret, timeout=5):
                            pass

                        ret = ret.getvalue()

                        if log.is_debug():
                            open('canceljob_ret.log', 'wb').write(ret)

                        log.log_data(ret)
                        self.dev.closeSoapFax()

                        if self.get_error_code(ret.decode('utf-8')) == HTTP_OK:
                            fax_send_state = FAX_SEND_STATE_CLOSE_SESSION
                        else:
                            fax_send_state = FAX_SEND_STATE_ERROR


                    elif fax_send_state == FAX_SEND_STATE_CLOSE_SESSION: # -------------- Close session (110, 170, 0)
                        log.debug("%s State: Close session" % ("*"*20))
                        log.debug("Closing session...")

                        try:
                            mm.close()
                        except NameError:
                            pass

                        try:
                            ff.close()
                        except NameError:
                            pass

                        time.sleep(1)

                        self.dev.closeSoapFax()
                        self.dev.close()

                        fax_send_state = FAX_SEND_STATE_DONE # Exit inner state machine


            elif state == STATE_CLEANUP: # --------------------------------- Cleanup (120, 0, 0)
                log.debug("%s State: Cleanup" % ("*"*20))

                if self.remove_temp_file:
                    log.debug("Removing merged file: %s" % self.f)
                    try:
                        os.remove(self.f)
                        log.debug("Removed")
                    except OSError:
                        log.debug("Not found")

                state = STATE_DONE # Exit outer state machine
    def export_forecast_sales(self):
        if self.start_period_id.date_start >= self.end_period_id.date_stop:
            raise Warning("End Date is Must be Greater then Start Date! ")
        sales_pivot = {}

        try:
                self._cr.execute("CREATE EXTENSION IF NOT EXISTS tablefunc;")
                from_date = self.start_period_id.date_start
                to_date = self.end_period_id.date_stop
                warehouse_ids = []
                for warehouse in self.warehouse_ids:
                    warehouse_ids.append(warehouse.id)
                warehouse_ids = '(' + str(warehouse_ids or [0]).strip('[]') + ')'        
                file_header1 = {'company': 0, 'product_name' : 1, 'warehouse': 2, 'sku':3}
                column_no = 4
                column_list = ""
                select_column_list = ""
                file_header2 = {} 
                file_header = {}
                periods = self.start_period_id.search([('date_start', '>=', from_date), ('date_stop', '<=', to_date)])
                if periods:
                    for period in periods:
                        column_list = column_list + ", %s float" % (period.code)  # #  ,Jan2017 float, Jan2017 float
                        select_column_list = select_column_list + ",coalesce(%s,0) as %s" % (period.code, period.code.lower())  # ,coalesce(Jan2017,0) as jan2017
                        file_header2.update({period.code.lower():column_no})
                        column_no += 1
                
                file_header.update(file_header1) 
                file_header.update(file_header2)
                
                query = """
                
            select    
            product,
            sku,
            product_name,
            company,
            warehouse
            %s    
            from crosstab(
            'Select 
                (w.name || '' - '' || f.sku)::varchar as product,
                f.sku,
                pt.name as product_name,
                co.name as company,
                w.name as warehouse,
                per.code as period,
                sum(forecast_sales) as sale_qty
            from     
                forecast_sale_ept f
                        Inner Join product_product p on p.id = f.product_id
                        Inner Join product_template pt on pt.id = p.product_tmpl_id
                        Inner Join stock_warehouse w on w.id = f.warehouse_id
                        Inner Join res_company co on co.id = w.company_id
                        Inner Join requisition_period_ept per on per.id = f.period_id
            where pt.active=true AND p.active=true AND date_start >= ''%s'' and date_stop <= ''%s'' and f.warehouse_id in %s
            group by f.sku, w.name,co.name,per.code, f.product_id, pt.name
            order by 1,3',
        'Select code from requisition_period_ept where date_start >= ''%s'' and date_stop <= ''%s'' order by date_start'
        )
        as newtable (
            product character varying, sku character varying, product_name character varying,company character varying , warehouse character varying %s
         );
         """ % (select_column_list, from_date, to_date, warehouse_ids, from_date, to_date, column_list)
                self._cr.execute(query)
                sales_pivot = self._cr.dictfetchall() 

        except psycopg2.DatabaseError as e:
            if e.pgcode == '58P01':
                raise Warning("To enable Export Forecast Sale, Please install Postgresql - Contrib in Postgresql")     
        
        if sales_pivot:
            workbook = xlwt.Workbook()
            worksheet = workbook.add_sheet("Forecast Sale Report", cell_overwrite_ok=True)
            # ## it will return sorted data in list of tuple (sorting based on value)
            sorted_file_header = sorted(file_header.items(), key=operator.itemgetter(1))
            header_bold = xlwt.easyxf("font: bold on, height 250; pattern: pattern solid, fore_colour gray25;alignment: horizontal center")
            column = 0
            for header in sorted_file_header:
                worksheet.write(0, header[1], header[0], header_bold)
                column += 1
            row = 1
            for sale in sales_pivot:
                for header in sorted_file_header:
                    col_no = header[1]
                    value = sale[header[0]]
                    if value:
                        worksheet.write(row, col_no, value)
                row += 1   
            
            fp = BytesIO()
            workbook.save(fp)
            fp.seek(0)
            report_data_file = base64.encodestring(fp.read())
            fp.close()
            self.write({'datas':report_data_file})
           
            return {
            'type' : 'ir.actions.act_url',
            'url':   'web/content/?model=import.export.forecast.sale.ept&field=datas&download=true&id=%s&filename=Forecast_sale_report.xls' % (self.id),
            'target': 'new',
            }
Пример #46
0
    def _decode_format(self, input: io.BytesIO, tag: TagType, length: int) -> INode:
        end = input.tell() + length

        arg1 = self._decode_expression(input)
        arg2 = nodes.StaticByteArray(input.read(end - input.tell()))
        return nodes.GenericElement(tag, None, arg1, arg2)
Пример #47
0
def run(arg):
    # Some info from the plugin dispatcher.
    environ = arg['environ']
    plugin_config = arg['config']

    config = RawConfigParser(defaults=plugin_config)
    config.add_section('iptables')
    config._sections['iptables'] = plugin_config

    # Setup plugin logging
    l = getLogger('plugin_iptables')
    l.addHandler(logHandler)
    if config.getboolean('iptables', 'debug'):
        l.setLevel(DEBUG)
        l.debug('debug logging enabled')

    # Get client IP from webapp, try HTTP_X_FORWARDED_FOR and fallback on
    # REMOTE_ADDR.
    client_ip = environ.get('HTTP_X_FORWARDED_FOR', environ.get('REMOTE_ADDR'))
    client_mac = None
    error_msg = None
    iptables_failed = False

    # Verify client IP
    try:
        socket.inet_aton(client_ip)
    except socket.error:
        l.error('Client ip:{ip} is invalid'.format(ip=repr(client_ip)))
        return {'error': str(e), 'failed': True}

    # Attempt to get client HW address with arping
    if use_arping:
        try:
            client_mac = mac_from_ip(l, config.get('iptables', 'arping'),
                                     client_ip)
        except Exception as e:
            l.info('Failed to get client HW address: {error}'.format(
                error=str(e)))
            error_msg = str(e)
            pass

    if client_ip:
        iptables_cmd = config.get('iptables', 'iptables_cmd').format(
            ip_address=client_ip, mac_address=client_mac)

        output = BytesIO()
        error = BytesIO()
        try:
            # The two arguments must not contain spaces of course.
            rc = sudo(tuple(iptables_cmd.split(' ')), _out=output, _err=error)
        except ErrorReturnCode:
            error.seek(0)
            error_msg = error.read()
            l.warn('{cmd}: exited badly: {error}'.format(cmd=('iptables',
                                                              iptables_cmd),
                                                         error=error_msg))
            iptables_failed = True
            raise

        except Exception as e:
            l.warn('{cmd}: failed: {error}'.format(cmd=('iptables',
                                                        iptables_cmd),
                                                   error=str(e)))
            error_msg = str(e)
            iptables_failed = True
            raise

        if rc.exit_code == 0:
            l.debug(
                'Created iptables rule for client:{ip}'.format(ip=client_ip))

    # If all else fails, error! This will be shown to end users.
    return {'error': error_msg, 'failed': iptables_failed}
Пример #48
0
 def _decode_tag_default(self, input: io.BytesIO, tag: TagType, length: int) -> INode:
     return nodes.DefaultElement(tag, input.read(length))
Пример #49
0
 def parseBitmapEventRaw(self, stream: BytesIO, header: int, compressionFlags: int, size: int) \
         -> FastPathBitmapEvent:
     return FastPathBitmapEvent(header, compressionFlags, [],
                                stream.read(size))
Пример #50
0
    def ImportWan(self, data, ptrWAN=0):
        in_file = BytesIO()
        in_file.write(data)
        in_file.seek(0)

        self.customPalette = []
        # Read WAN header: ptr to AnimInfo, ptr to ImageDataInfo
        in_file.seek(ptrWAN)
        ptrAnimInfo = int.from_bytes(in_file.read(4), 'little')
        ptrImageDataInfo = int.from_bytes(in_file.read(4), 'little')
        if ptrAnimInfo == 0 or ptrImageDataInfo == 0:
            raise ValueError('Null pointer in Wan Header!')
        imgType = int.from_bytes(in_file.read(2), 'little')
        if imgType != 1:
            raise NotImplementedError('Non-character sprite import currently not supported.')

        updateUnusedStats([], 'Unk#12', int.from_bytes(in_file.read(2), 'little'))

        # Read ImageDataInfo: ptr to ImageDataTable block, ptr to PaletteInfo, NbImgs, print Unk#13 and Is256ColorSpr
        in_file.seek(ptrImageDataInfo)
        ptrImageDataTable = int.from_bytes(in_file.read(4), 'little')
        ptrPaletteInfo = int.from_bytes(in_file.read(4), 'little')
        if ptrImageDataTable == 0 or ptrPaletteInfo == 0:
            raise ValueError('Null pointer in Image Data Info!')
        # Unk#13 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Is256ColorSpr - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Unk#11 - ALWAYS 1 unless completely empty?
        updateUnusedStats([], 'Unk#11', int.from_bytes(in_file.read(2), 'little'))
        nbImgs = int.from_bytes(in_file.read(2), 'little')
        # print('  NbImgs:' + str(nbImgs))

        # Read PaletteInfo: ptr to PaletteDataBlock, print NbColorsPerRow and All unknowns
        in_file.seek(ptrPaletteInfo)
        ptrPaletteDataBlock = int.from_bytes(in_file.read(4), 'little')
        # Unk#3 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        nbColorsPerRow = max(1, int.from_bytes(in_file.read(2), 'little'))
        # Unk#4 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Unk#5 - ALWAYS 255
        int.from_bytes(in_file.read(2), 'little')

        # Read PaletteDataBlock: Save contents
        in_file.seek(ptrPaletteDataBlock)
        self.customPalette = []
        totalColors = (ptrPaletteInfo - ptrPaletteDataBlock) // 4
        totalPalettes = totalColors // nbColorsPerRow
        for ii in range(totalPalettes):
            palette = []
            for jj in range(nbColorsPerRow):
                red = int.from_bytes(in_file.read(1), 'little')
                blue = int.from_bytes(in_file.read(1), 'little')
                green = int.from_bytes(in_file.read(1), 'little')
                in_file.read(1)
                palette.append((red, blue, green, 255))
            self.customPalette.append(palette)

        ##Read ImageDataTable: list of all ptr to CompressedImages (use the Nb of images variable to know when to stop)
        in_file.seek(ptrImageDataTable)
        ptrImgs = []
        for img in range(nbImgs):
            ptrImgs.append(int.from_bytes(in_file.read(4), 'little'))

        ##Read CompTable: Read all Image data and assemble; add byte arrays into a list.
        self.imgData = []  ##one continuous list of bytes
        for ptrImg in ptrImgs:
            in_file.seek(ptrImg)
            imgPiece = ImgPiece()
            imgPiece.imgPx = []
            ##Read pixels or zero padding.
            while True:
                ptrPixSrc = int.from_bytes(in_file.read(4), 'little')
                amt = int.from_bytes(in_file.read(2), 'little')
                # amt is ALWAYS a multiple of 32.  values 1-31 x 32 have been observed
                if ptrPixSrc == 0 and amt == 0:
                    break
                # Unk#14 - ALWAYS 0
                int.from_bytes(in_file.read(2), 'little')
                # z-sort is always consistent for a full image strip
                imgPiece.zSort = int.from_bytes(in_file.read(4), 'little')

                pxStrip = []
                if ptrPixSrc == 0:
                    for zero in range(amt):
                        pxStrip.append(0)
                else:
                    ptrCurrent = in_file.tell()
                    in_file.seek(ptrPixSrc)
                    for pix in range(amt):
                        pxStrip.append(int.from_bytes(in_file.read(1), 'little'))
                    in_file.seek(ptrCurrent)
                imgPiece.imgPx.append(pxStrip)
            self.imgData.append(imgPiece)

        ##Read AnimInfo: ptr to MetaFramesRefTable, ptr to AnimGroupTable
        in_file.seek(ptrAnimInfo)
        ptrMetaFramesRefTable = int.from_bytes(in_file.read(4), 'little')
        ptrOffsetsTable = int.from_bytes(in_file.read(4), 'little')
        ptrAnimGroupTable = int.from_bytes(in_file.read(4), 'little')
        nbAnimGroups = int.from_bytes(in_file.read(2), 'little')
        # Unk#6 - Max number of blocks that a frame takes
        int.from_bytes(in_file.read(2), 'little')
        # Unk#7 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Unk#8 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Unk#9 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # Unk#10 - ALWAYS 0
        int.from_bytes(in_file.read(2), 'little')
        # get ptr to AnimSequenceTable
        in_file.seek(ptrAnimGroupTable)
        ptrAnimGroups = []
        for ptrAnimSeq in range(nbAnimGroups):
            ##read the location
            animLoc = int.from_bytes(in_file.read(4), 'little')
            ##read the length
            animLength = int.from_bytes(in_file.read(2), 'little')
            # Unk#16 - ALWAYS 0
            int.from_bytes(in_file.read(2), 'little')
            ##save curlocation
            curLocation = in_file.tell()
            ##go to seq location
            in_file.seek(animLoc)
            ptrAnims = []
            for ii in range(animLength):
                ##read all anims
                animPtr = int.from_bytes(in_file.read(4), 'little')
                ptrAnims.append(animPtr)
            ptrAnimGroups.append(ptrAnims)
            in_file.seek(curLocation)

        if ptrOffsetsTable == 0:
            raise ValueError("Read a zero for offset table pointer.")

        ptrFramesRefTableEnd = ptrOffsetsTable

        # Read MetaFramesRefTable: list of all ptr to Meta Frames
        # stop when reached particleOffsetsTable
        # or on AnimSequenceTable, if the above is zero
        in_file.seek(ptrMetaFramesRefTable)
        ptrMetaFrames = []
        while in_file.tell() < ptrFramesRefTableEnd:
            ptrMetaFrames.append(int.from_bytes(in_file.read(4), 'little'))
        # print('  NbMetaframes:' + str(len(ptrMetaFrames)))

        ##Read MetaFrames: for each meta frame group, read until "end of meta frame group" bit is reached.
        self.frameData = []
        for idx, ptrMetaFrame in enumerate(ptrMetaFrames):
            in_file.seek(ptrMetaFrame)
            metaFrameData = []
            while True:
                imgIndex = int.from_bytes(in_file.read(2), 'little', signed=True)
                # Unk#0 - ALWAYS 0 EXCEPT for just ONE official sprite:
                # m_ground,0546,Unk#0,171,0,2560
                int.from_bytes(in_file.read(2), 'little')
                attr0 = int.from_bytes(in_file.read(2), 'little')
                attr1 = int.from_bytes(in_file.read(2), 'little')
                attr2 = int.from_bytes(in_file.read(2), 'little')
                newFramePiece = MetaFramePiece(imgIndex, attr0, attr1, attr2)

                # values of interest:
                # resolution y
                # color palette mode - ALWAYS 0
                # mosaic mode - ALWAYS 0
                # obj mode - ALWAYS Normal
                # obj disable -
                updateUnusedStats([str(len(self.frameData)), str(len(metaFrameData))],
                                  'MFDisabled', int(newFramePiece.isDisabled()))
                # rotation and scaling - ALWAYS 1 when DISABLE is 0
                # Y offset

                # resolution x
                # flip vertical
                # flip horizontal
                # last frame
                # unused - leave 0
                # X offset

                # palette - this will be autocalculated
                # priority - ALWAYS 3
                # tileindex - ALWAYS follows the rules of memory placement: 4 blocks = +1 index.  1 block min

                ##document the used config
                metaFrameData.append(newFramePiece)
                if newFramePiece.isLast():
                    break

            self.frameData.append(metaFrameData)

        in_file.seek(ptrOffsetsTable)
        self.offsetData = []
        for offset_idx in range(len(ptrMetaFrames)):
            headX = int.from_bytes(in_file.read(2), 'little', signed=True)
            headY = int.from_bytes(in_file.read(2), 'little', signed=True)
            lhandX = int.from_bytes(in_file.read(2), 'little', signed=True)
            lhandY = int.from_bytes(in_file.read(2), 'little', signed=True)
            rhandX = int.from_bytes(in_file.read(2), 'little', signed=True)
            rhandY = int.from_bytes(in_file.read(2), 'little', signed=True)
            centerX = int.from_bytes(in_file.read(2), 'little', signed=True)
            centerY = int.from_bytes(in_file.read(2), 'little', signed=True)
            self.offsetData.append(FrameOffset((headX, headY), (lhandX, lhandY), (rhandX, rhandY), (centerX, centerY)))

        ##read all anim pointers
        self.animGroupData = []
        for ptrAnimGroup in ptrAnimGroups:
            animGroup = []
            for a_idx, ptrAnim in enumerate(ptrAnimGroup):
                # if repeating the same pointer, it's the same animation. skip
                if a_idx > 0 and ptrAnim == ptrAnimGroup[a_idx - 1]:
                    continue
                in_file.seek(ptrAnim)
                animSequence = []
                while True:
                    frameDur = int.from_bytes(in_file.read(1), 'little')
                    flag = int.from_bytes(in_file.read(1), 'little')
                    frameIndex = int.from_bytes(in_file.read(2), 'little')
                    sprOffX = int.from_bytes(in_file.read(2), 'little', signed=True)
                    sprOffY = int.from_bytes(in_file.read(2), 'little', signed=True)
                    sdwOffX = int.from_bytes(in_file.read(2), 'little', signed=True)
                    sdwOffY = int.from_bytes(in_file.read(2), 'little', signed=True)
                    if frameDur == 0:
                        break
                    else:
                        animSequence.append(SequenceFrame(frameIndex, frameDur, flag,
                                                        (sprOffX, sprOffY), (sdwOffX, sdwOffY)))
                animGroup.append(animSequence)
            self.animGroupData.append(animGroup)
Пример #51
0
 def parseScanCodeEvent(self, eventFlags: int, eventHeader: int,
                        stream: BytesIO) -> FastPathScanCodeEvent:
     scanCode = Uint8.unpack(stream.read(1))
     return FastPathScanCodeEvent(eventHeader, scanCode,
                                  eventFlags & 1 != 0)
Пример #52
0
class NBTFileReader(object):
    """Low level class that reads the Named Binary Tag format used by Minecraft

    """
    # compile the unpacker's into a classes
    _byte   = struct.Struct("b")
    _short  = struct.Struct(">h")
    _ushort = struct.Struct(">H")
    _int    = struct.Struct(">i")
    _uint   = struct.Struct(">I")
    _long   = struct.Struct(">q")
    _float  = struct.Struct(">f")
    _double = struct.Struct(">d")

    def __init__(self, fileobj, is_gzip=True):
        """Create a NBT parsing object with the given file-like
        object. Setting is_gzip to False parses the file as a zlib
        stream instead."""
        if is_gzip:
            self._file = gzip.GzipFile(fileobj=fileobj, mode='rb')
        else:
            # pure zlib stream -- maybe later replace this with
            # a custom zlib file object?
            data = zlib.decompress(fileobj.read())
            self._file = BytesIO(data)

        # mapping of NBT type ids to functions to read them out
        self._read_tagmap = {
            0: self._read_tag_end,
            1: self._read_tag_byte,
            2: self._read_tag_short,
            3: self._read_tag_int,
            4: self._read_tag_long,
            5: self._read_tag_float,
            6: self._read_tag_double,
            7: self._read_tag_byte_array,
            8: self._read_tag_string,
            9: self._read_tag_list,
            10: self._read_tag_compound,
            11: self._read_tag_int_array,
            12: self._read_tag_long_array,
        }

    # These private methods read the payload only of the following types
    def _read_tag_end(self):
        # Nothing to read
        return 0

    def _read_tag_byte(self):
        byte = self._file.read(1)
        return self._byte.unpack(byte)[0]

    def _read_tag_short(self):
        bytes = self._file.read(2)
        return self._short.unpack(bytes)[0]

    def _read_tag_int(self):
        bytes = self._file.read(4)
        return self._int.unpack(bytes)[0]

    def _read_tag_long(self):
        bytes = self._file.read(8)
        return self._long.unpack(bytes)[0]

    def _read_tag_float(self):
        bytes = self._file.read(4)
        return self._float.unpack(bytes)[0]

    def _read_tag_double(self):
        bytes = self._file.read(8)
        return self._double.unpack(bytes)[0]

    def _read_tag_byte_array(self):
        length = self._uint.unpack(self._file.read(4))[0]
        bytes = self._file.read(length)
        return bytes

    def _read_tag_int_array(self):
        length = self._uint.unpack(self._file.read(4))[0]
        int_bytes = self._file.read(length * 4)
        return struct.unpack(">%ii" % length, int_bytes)

    def _read_tag_long_array(self):
        length = self._uint.unpack(self._file.read(4))[0]
        long_bytes = self._file.read(length * 8)
        return struct.unpack(">%iq" % length, long_bytes)

    def _read_tag_string(self):
        length = self._ushort.unpack(self._file.read(2))[0]
        # Read the string
        string = self._file.read(length)
        # decode it and return
        return string.decode("UTF-8", 'replace')

    def _read_tag_list(self):
        tagid = self._read_tag_byte()
        length = self._uint.unpack(self._file.read(4))[0]

        read_method = self._read_tagmap[tagid]
        l = [None] * length
        for i in range(length):
            l[i] = read_method()
        return l

    def _read_tag_compound(self):
        # Build a dictionary of all the tag names mapping to their payloads
        tags = {}
        while True:
            # Read a tag
            tagtype = ord(self._file.read(1))

            if tagtype == 0:
                break

            name = self._read_tag_string()
            payload = self._read_tagmap[tagtype]()
            tags[name] = payload

        return tags

    def read_all(self):
        """Reads the entire file and returns (name, payload)
        name is the name of the root tag, and payload is a dictionary mapping
        names to their payloads

        """
        # Read tag type
        try:
            tagtype = ord(self._file.read(1))
            if tagtype != 10:
                raise Exception("Expected a tag compound")
            # Read the tag name
            name = self._read_tag_string()
            payload = self._read_tag_compound()
            return (name, payload)
        except (struct.error, ValueError, TypeError, EOFError) as e:
            raise CorruptNBTError("could not parse nbt: %s" % (str(e),))
Пример #53
0
def search():
    try:
        now = datetime.now()
        shops = get_shop()
        date = request.GET.get("date")
        if date:
            if re.match("\d{8}", date):
                task_ids = get_task_ids(date)
                logger.debug("Get oct task ids at %s. " % date)
                shops = [shop for shop in shops
                         if str(shop["store_task_id"]) in task_ids]
            else:
                return template(os.path.join(
                    project_path, "search.html"), date=date)
        crawl_tasks = dict()
        unregist_tasks = list()

        for shop in shops:
            explore_tasks = [tasks for tasks in shop["explore_tasks"]
                             if tasks["explore_task_id"]]
            if explore_tasks:
                for explore_task in explore_tasks:
                    if shop["taxon"].count(explore_task["taxon"]):
                        taxon = shop["taxon"]
                    else:
                        taxon = "|".join([shop["taxon"], explore_task["taxon"]])
                    try:
                        last_exec_at = datetime.strptime(
                            explore_task["last_exec_at"][:19],
                            "%Y-%m-%dT%H:%M:%S") + timedelta(hours=8)
                        days = str((now - last_exec_at).days)
                        last_exec_at = last_exec_at.strftime(
                            "%Y-%m-%d %H:%M:%S")
                    except (ValueError, TypeError):
                        days = "0"
                        last_exec_at = "尚未执行"
                    repeated = "持续执行" if explore_task["is_repeated"] else ""
                    crawl_tasks[explore_task["explore_task_id"]] = \
                        [shop["store_name"], str(shop["store_task_id"]),
                        str(explore_task["explore_task_id"]),
                        shop["source_site_name"],
                        taxon, shop["gender"], shop["brand"],
                        last_exec_at, days, repeated]
            else:
                unregist_tasks.append(shop)
        logger.debug("Compare oct task and roc task finished. ")
        crawl_tasks_str = ""
        unregist_tasks_str = ""

        crawl_tasks_str += \
            "店铺,店铺任务ID,ROC任务ID,来源网站名称,分类名称," \
            "性别名称,品牌名称,最后执行时间,最后执行时间差/天,是否持续执行\n"
        for crawl_task in crawl_tasks.values():
            crawl_tasks_str += (",".join(crawl_task)) + "\n"

        unregist_tasks_str += "编号,来源网站,品牌,性别,分类,店铺/任务ID\n"
        for index, unregist_task in enumerate(unregist_tasks):
            unregist_tasks_str += "%s,%s,%s,%s,%s,%s/%s\n" % (
                index + 1, unregist_task["source_site_name"],
                unregist_task["brand"], unregist_task["gender"],
                unregist_task["taxon"],
                unregist_task["store_name"],
                unregist_task["store_task_id"])

        zip_file = BytesIO()
        zf = zipfile.ZipFile(zip_file, "w")
        zf.writestr("crawl_tasks.csv", crawl_tasks_str.encode("gbk"))
        zf.writestr("unregist_tasks.csv", unregist_tasks_str.encode("gbk"))
        zf.close()
        zip_file.seek(0)
        body = zip_file.read()
        zip_file.close()
        logger.debug("Create zip file finished. ")
        headers = dict()
        headers['Content-Type'] = 'application/zip'
        headers['Content-Length'] = len(body)
        headers['Date'] = time.strftime(
            "%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
        headers["Accept-Ranges"] = "bytes"
        headers['Content-Disposition'] = 'attachment; filename="%s.zip"' % \
                                         time.strftime("%Y%m%d%H%M%S")
        return HTTPResponse(body, **headers)
    except Exception as e:
        logger.error("Error: %s" % traceback.format_exc())
        raise e
Пример #54
0
    def get_data(self):
        """Update Position data with NatPacket containing Frame data."""

        # TODO: Set up auto-detection of changes in the models, so that self.get_model() gets called.

        try:
            major = self.natnet_version[0]
            minor = self.natnet_version[1]
        except KeyError:
            raise KeyError(
                "Can't get data until NatNetVersion is known. Try re-pinging the server."
            )

        packet = self.data_sock.recv()

        # Get Data and Convert to BytesIO type for easier and quicker reading.
        data = BytesIO(packet._packet[4:])

        # Frame Number
        self.iFrame = unpack("i", data.read(4))[0]  # Frame number

        # MarkerSets
        nMarkerSets = unpack("i", data.read(4))[0]
        for el in range(nMarkerSets):  # nMarkerSets
            marker_set = self.marker_sets[strcpy(
                data)]  # Get name of markerset
            nMarkers = unpack('i', data.read(4))[0]
            assert nMarkers == len(marker_set.markers)
            for marker in marker_set.markers:  # nMarkers
                marker.position = unpack('3f', data.read(12))

        # Unidentified Markers
        self.unidentified_markers = []
        nOtherMarkers = unpack('i', data.read(4))[0]

        for el in range(nOtherMarkers):  # nOtherMarkers
            x, y, z = unpack('3f', data.read(12))
            self.unidentified_markers.append(Marker(position=(x, y,
                                                              z)))  # (x, y, z)

        # Rigid Bodies
        nRigidBodies = unpack('i', data.read(4))[0]
        for el in range(nRigidBodies):  # nRigidBodies
            # Get body id, position, and rotation
            body_id, x, y, z, qx, qy, qz, qw = unpack('i7f', data.read(32))
            body = self.rigid_bodies_by_id[body_id]  #self.rigid_bodies[id]
            body.position = x, y, z
            body.quaternion = qx, qy, qz, qw

            # Get body's markers' information
            body.markers = [
            ]  # That's right.  Reset the whole damn marker list.
            for el2 in range(unpack('i', data.read(4))[0]):  # nRigidMarkers
                mx, my, mz = unpack('3f', data.read(12))
                body.markers.append(Marker(position=(mx, my, mz)))

            for mark_idx in range(len(
                    body.markers)):  # Works for NatNet 2.0.0.0 on.
                body.markers[mark_idx].id = unpack(
                    'i', data.read(4))[0]  # Marker ID
            for mark_idx in range(len(body.markers)):
                body.markers[mark_idx].size = unpack(
                    'f', data.read(4)
                )[0]  # Defaults to 24mm, unless "Diameter Calculation" is checked in Motive's Reconstruction pane.

            # Get other info about body recording.
            body.error = unpack(
                'f', data.read(4))  # Mean marker error (in meters/marker)
            if (major == 2 and minor >= 6) or major > 2:
                body.seen = bool(
                    unpack('h', data.read(2))[0]
                    & 0x01)  # Tracking was successful (bTrackingValid)

        # Skeletons (version 2.1 and later)
        if (major == 2 and minor > 0) or major > 2:

            nSkeletons = unpack('i', data.read(4))[0]

            # TODO: Get Skeletons working.
            if nSkeletons is not 0:
                raise NotImplementedError(
                    "Skeletons in dataset. This functionality is not yet tested."
                )
            """    self.skeletons = dict()

            for el in range(nSkeletons):  # nSkeletons

                skel_id = unpack('i', data.read(4))[0]
                skeleton = self.skeletons[skel_id]
                for el2 in range(unpack('i', data.read(4))[0]):  # nRigidBodies

                    body_id, x, y, z, qx, qy, qz, qw = unpack('i7f',data.read(32))
                    body = skeleton.rigid_bodies[body_id]
                    body.position, body.rotation = (x, y, z), (qx, qy, qz, qw)

                    body.markers = []  # That's right.  Reset the whole damn marker list.
                    for el3 in range(unpack('i', data.read(4))[0]):  # nRigidMarkers
                        x, y, z = unpack('3f', data.read(12))
                        body.markers.append(Marker((x, y, z)))
                    for mark_idx in range(len(body.markers)):
                        body.markers[mark_idx].id = unpack('i', data.read(4))[0]  # Marker ID
                    for mark_idx in range(len(body.markers)):
                        body.markers[mark_idx].size = unpack('f', data.read(4))[0]

                    body.error = unpack('f', data.read(4))[0]  # Mean marker error (fError)
                    skeleton.rigid_bodies.append(body)
            """

        # Labeled Markers (version 2.3 and later).  IDs are not unique to marker, just within each body.  Apply to body accordingly.
        if (major == 2 and minor >= 3) or major > 2:
            nLabeledMarkers = unpack('i', data.read(4))[0]

            for el in range(nLabeledMarkers):  # nLabeledMarkers
                marker_id, x, y, z, size = unpack('i4f', data.read(20))
                if marker_id in self.labeled_markers:
                    self.labeled_markers[marker_id].position = x, y, z
                    self.labeled_markers[marker_id].size = size
                else:
                    labeled_marker = Marker(position=(x, y, z))
                    labeled_marker.id = marker_id
                    labeled_marker.size = size
                    self.labeled_markers[marker_id] = labeled_marker

                # (version 2.6 and later)
                if (major == 2 and minor >= 6) or major > 2 or major == 0:
                    params = unpack('h', data.read(2))[0]
                    self.labeled_markers[marker_id].occluded = bool(
                        params & 0x01)  # marker occluded this frame
                    self.labeled_markers[marker_id].pc_solved = bool(
                        params & 0x02
                    )  # Position provided by point cloud solve (directly measured)
                    self.labeled_markers[marker_id].model_solved = bool(
                        params & 0x04
                    )  # Position provided by model solve (indirectly filled in)

        # For NatNet 2.9, Force Plate Data is supplied.
        if (major == 2 and minor >= 9):
            force_plate_n = unpack('i', data.read(4))[0]
            if force_plate_n:
                raise NotImplementedError("Force Plates not yet supported.")
            for force_plate in range(force_plate_n):
                force_plate_id = unpack('i', data.read(4))[0]
                for force_plate_channel in range(unpack('i', data.read(4))[0]):
                    for frame in unpack('i', data.read(4))[0]:
                        frame_val = unpack('f', data.read(4))[0]

        # Final Frame Info
        self.latency = unpack(
            'f', data.read(4)
        )[0]  # TODO: Find out how Optitrack latency is calculated. Somehow related to self.timestamp.
        (timecode, timecodeSub) = unpack(
            '2I',
            data.read(8))  # TODO: Get timecode.  Seems stuck at 0:0:0:0.0
        hours = timecode // (60 * 60)
        minutes = (timecode - (hours * 360)) // 60
        seconds = timecode % 60
        sub_frames = timecodeSub
        self.time = datetime.time(
            hours, minutes, seconds)  # TODO: Encode sub_frames into timecode.

        if (major == 2 and minor >= 7) or major > 2:
            self.timestamp = unpack('d', data.read(8))[
                0]  # Seconds since starting session, in Double Precision Float
        else:
            self.timestamp = unpack(
                'f',
                data.read(4))[0]  # Seconds since starting session, in Float

        # Check if models have changed from last frame (perhaps something was added during recording session.)
        end_params = unpack('h', data.read(2))[0]
        self.is_recording = bool(end_params & 0x01)  # Motive is Recording
        self.tracked_models_changed = bool(end_params & 0x02)
Пример #55
0
class HttpRequest:
    """A basic HTTP request."""

    # The encoding used in GET/POST dicts. None means use default setting.
    _encoding = None
    _upload_handlers = []

    def __init__(self):
        # WARNING: The `WSGIRequest` subclass doesn't call `super`.
        # Any variable assignment made here should also happen in
        # `WSGIRequest.__init__()`.

        self.GET = QueryDict(mutable=True)
        self.POST = QueryDict(mutable=True)
        self.COOKIES = {}
        self.META = {}
        self.FILES = MultiValueDict()

        self.path = ''
        self.path_info = ''
        self.method = None
        self.resolver_match = None
        self.content_type = None
        self.content_params = None

    def __repr__(self):
        if self.method is None or not self.get_full_path():
            return '<%s>' % self.__class__.__name__
        return '<%s: %s %r>' % (self.__class__.__name__, self.method,
                                self.get_full_path())

    def _get_raw_host(self):
        """
        Return the HTTP host using the environment or request headers. Skip
        allowed hosts protection, so may return an insecure host.
        """
        # We try three options, in order of decreasing preference.
        if settings.USE_X_FORWARDED_HOST and ('HTTP_X_FORWARDED_HOST'
                                              in self.META):
            host = self.META['HTTP_X_FORWARDED_HOST']
        elif 'HTTP_HOST' in self.META:
            host = self.META['HTTP_HOST']
        else:
            # Reconstruct the host using the algorithm from PEP 333.
            host = self.META['SERVER_NAME']
            server_port = self.get_port()
            if server_port != ('443' if self.is_secure() else '80'):
                host = '%s:%s' % (host, server_port)
        return host

    def get_host(self):
        """Return the HTTP host using the environment or request headers."""
        host = self._get_raw_host()

        # Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
        allowed_hosts = settings.ALLOWED_HOSTS
        if settings.DEBUG and not allowed_hosts:
            allowed_hosts = ['localhost', '127.0.0.1', '[::1]']

        domain, port = split_domain_port(host)
        if domain and validate_host(domain, allowed_hosts):
            return host
        else:
            msg = "Invalid HTTP_HOST header: %r." % host
            if domain:
                msg += " You may need to add %r to ALLOWED_HOSTS." % domain
            else:
                msg += " The domain name provided is not valid according to RFC 1034/1035."
            raise DisallowedHost(msg)

    def get_port(self):
        """Return the port number for the request as a string."""
        if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
            port = self.META['HTTP_X_FORWARDED_PORT']
        else:
            port = self.META['SERVER_PORT']
        return str(port)

    def get_full_path(self, force_append_slash=False):
        return self._get_full_path(self.path, force_append_slash)

    def get_full_path_info(self, force_append_slash=False):
        return self._get_full_path(self.path_info, force_append_slash)

    def _get_full_path(self, path, force_append_slash):
        # RFC 3986 requires query string arguments to be in the ASCII range.
        # Rather than crash if this doesn't happen, we encode defensively.
        return '%s%s%s' % (escape_uri_path(path), '/' if force_append_slash
                           and not path.endswith('/') else '',
                           ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))
                            ) if self.META.get('QUERY_STRING', '') else '')

    def get_signed_cookie(self,
                          key,
                          default=RAISE_ERROR,
                          salt='',
                          max_age=None):
        """
        Attempt to return a signed cookie. If the signature fails or the
        cookie has expired, raise an exception, unless the `default` argument
        is provided,  in which case return that value.
        """
        try:
            cookie_value = self.COOKIES[key]
        except KeyError:
            if default is not RAISE_ERROR:
                return default
            else:
                raise
        try:
            value = signing.get_cookie_signer(salt=key + salt).unsign(
                cookie_value, max_age=max_age)
        except signing.BadSignature:
            if default is not RAISE_ERROR:
                return default
            else:
                raise
        return value

    def get_raw_uri(self):
        """
        Return an absolute URI from variables available in this request. Skip
        allowed hosts protection, so may return insecure URI.
        """
        return '{scheme}://{host}{path}'.format(
            scheme=self.scheme,
            host=self._get_raw_host(),
            path=self.get_full_path(),
        )

    def build_absolute_uri(self, location=None):
        """
        Build an absolute URI from the location and the variables available in
        this request. If no ``location`` is specified, build the absolute URI
        using request.get_full_path(). If the location is absolute, convert it
        to an RFC 3987 compliant URI and return it. If location is relative or
        is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
        URL constructed from the request variables.
        """
        if location is None:
            # Make it an absolute url (but schemeless and domainless) for the
            # edge case that the path starts with '//'.
            location = '//%s' % self.get_full_path()
        bits = urlsplit(location)
        if not (bits.scheme and bits.netloc):
            # Handle the simple, most common case. If the location is absolute
            # and a scheme or host (netloc) isn't provided, skip an expensive
            # urljoin() as long as no path segments are '.' or '..'.
            if (bits.path.startswith('/') and not bits.scheme
                    and not bits.netloc and '/./' not in bits.path
                    and '/../' not in bits.path):
                # If location starts with '//' but has no netloc, reuse the
                # schema and netloc from the current request. Strip the double
                # slashes and continue as if it wasn't specified.
                if location.startswith('//'):
                    location = location[2:]
                location = self._current_scheme_host + location
            else:
                # Join the constructed URL with the provided location, which
                # allows the provided location to apply query strings to the
                # base path.
                location = urljoin(self._current_scheme_host + self.path,
                                   location)
        return iri_to_uri(location)

    @cached_property
    def _current_scheme_host(self):
        return '{}://{}'.format(self.scheme, self.get_host())

    def _get_scheme(self):
        """
        Hook for subclasses like WSGIRequest to implement. Return 'http' by
        default.
        """
        return 'http'

    @property
    def scheme(self):
        if settings.SECURE_PROXY_SSL_HEADER:
            try:
                header, value = settings.SECURE_PROXY_SSL_HEADER
            except ValueError:
                raise ImproperlyConfigured(
                    'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
                )
            if self.META.get(header) == value:
                return 'https'
        return self._get_scheme()

    def is_secure(self):
        return self.scheme == 'https'

    def is_ajax(self):
        return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'

    @property
    def encoding(self):
        return self._encoding

    @encoding.setter
    def encoding(self, val):
        """
        Set the encoding used for GET/POST accesses. If the GET or POST
        dictionary has already been created, remove and recreate it on the
        next access (so that it is decoded correctly).
        """
        self._encoding = val
        if hasattr(self, 'GET'):
            del self.GET
        if hasattr(self, '_post'):
            del self._post

    def _initialize_handlers(self):
        self._upload_handlers = [
            uploadhandler.load_handler(handler, self)
            for handler in settings.FILE_UPLOAD_HANDLERS
        ]

    @property
    def upload_handlers(self):
        if not self._upload_handlers:
            # If there are no upload handlers defined, initialize them from settings.
            self._initialize_handlers()
        return self._upload_handlers

    @upload_handlers.setter
    def upload_handlers(self, upload_handlers):
        if hasattr(self, '_files'):
            raise AttributeError(
                "You cannot set the upload handlers after the upload has been processed."
            )
        self._upload_handlers = upload_handlers

    def parse_file_upload(self, META, post_data):
        """Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
        self.upload_handlers = ImmutableList(
            self.upload_handlers,
            warning=
            "You cannot alter upload handlers after the upload has been processed."
        )
        parser = MultiPartParser(META, post_data, self.upload_handlers,
                                 self.encoding)
        return parser.parse()

    @property
    def body(self):
        if not hasattr(self, '_body'):
            if self._read_started:
                raise RawPostDataException(
                    "You cannot access body after reading from request's data stream"
                )

            # Limit the maximum request data size that will be handled in-memory.
            if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                    and int(self.META.get('CONTENT_LENGTH')
                            or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                raise RequestDataTooBig(
                    'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                )

            try:
                self._body = self.read()
            except IOError as e:
                raise UnreadablePostError(*e.args) from e
            self._stream = BytesIO(self._body)
        return self._body

    def _mark_post_parse_error(self):
        self._post = QueryDict()
        self._files = MultiValueDict()

    def _load_post_and_files(self):
        """Populate self._post and self._files if the content-type is a form type"""
        if self.method != 'POST':
            self._post, self._files = QueryDict(
                encoding=self._encoding), MultiValueDict()
            return
        if self._read_started and not hasattr(self, '_body'):
            self._mark_post_parse_error()
            return

        if self.content_type == 'multipart/form-data':
            if hasattr(self, '_body'):
                # Use already read data
                data = BytesIO(self._body)
            else:
                data = self
            try:
                self._post, self._files = self.parse_file_upload(
                    self.META, data)
            except MultiPartParserError:
                # An error occurred while parsing POST data. Since when
                # formatting the error the request handler might access
                # self.POST, set self._post and self._file to prevent
                # attempts to parse POST data again.
                self._mark_post_parse_error()
                raise
        elif self.content_type == 'application/x-www-form-urlencoded':
            self._post, self._files = QueryDict(
                self.body, encoding=self._encoding), MultiValueDict()
        else:
            self._post, self._files = QueryDict(
                encoding=self._encoding), MultiValueDict()

    def close(self):
        if hasattr(self, '_files'):
            for f in chain.from_iterable(l[1] for l in self._files.lists()):
                f.close()

    # File-like and iterator interface.
    #
    # Expects self._stream to be set to an appropriate source of bytes by
    # a corresponding request subclass (e.g. WSGIRequest).
    # Also when request data has already been read by request.POST or
    # request.body, self._stream points to a BytesIO instance
    # containing that data.

    def read(self, *args, **kwargs):
        self._read_started = True
        try:
            return self._stream.read(*args, **kwargs)
        except IOError as e:
            raise UnreadablePostError(*e.args) from e

    def readline(self, *args, **kwargs):
        self._read_started = True
        try:
            return self._stream.readline(*args, **kwargs)
        except IOError as e:
            raise UnreadablePostError(*e.args) from e

    def __iter__(self):
        return iter(self.readline, b'')

    def xreadlines(self):
        warnings.warn(
            'HttpRequest.xreadlines() is deprecated in favor of iterating the '
            'request.',
            RemovedInDjango30Warning,
            stacklevel=2,
        )
        yield from self

    def readlines(self):
        return list(self)
Пример #56
0
class SegmentBuffer:
    """Buffer for writing a sequence of packets to the output as a segment."""
    def __init__(
        self,
        hass: HomeAssistant,
        outputs_callback: Callable[[], Mapping[str, StreamOutput]],
    ) -> None:
        """Initialize SegmentBuffer."""
        self._stream_id: int = 0
        self._hass = hass
        self._outputs_callback: Callable[[], Mapping[
            str, StreamOutput]] = outputs_callback
        # sequence gets incremented before the first segment so the first segment
        # has a sequence number of 0.
        self._sequence = -1
        self._segment_start_dts: int = cast(int, None)
        self._memory_file: BytesIO = cast(BytesIO, None)
        self._av_output: av.container.OutputContainer = None
        self._input_video_stream: av.video.VideoStream = None
        self._input_audio_stream: av.audio.stream.AudioStream | None = None
        self._output_video_stream: av.video.VideoStream = None
        self._output_audio_stream: av.audio.stream.AudioStream | None = None
        self._segment: Segment | None = None
        # the following 3 member variables are used for Part formation
        self._memory_file_pos: int = cast(int, None)
        self._part_start_dts: int = cast(int, None)
        self._part_has_keyframe = False
        self._stream_settings: StreamSettings = hass.data[DOMAIN][
            ATTR_SETTINGS]
        self._start_time = datetime.datetime.utcnow()

    def make_new_av(
        self,
        memory_file: BytesIO,
        sequence: int,
        input_vstream: av.video.VideoStream,
        input_astream: av.audio.stream.AudioStream,
    ) -> tuple[av.container.OutputContainer, av.video.VideoStream,
               av.audio.stream.AudioStream | None, ]:
        """Make a new av OutputContainer and add output streams."""
        add_audio = input_astream and input_astream.name in AUDIO_CODECS
        container = av.open(
            memory_file,
            mode="w",
            format=SEGMENT_CONTAINER_FORMAT,
            container_options={
                **{
                    # Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
                    # "cmaf" flag replaces several of the movflags used, but too recent to use for now
                    "movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                    # Sometimes the first segment begins with negative timestamps, and this setting just
                    # adjusts the timestamps in the output from that segment to start from 0. Helps from
                    # having to make some adjustments in test_durations
                    "avoid_negative_ts": "make_non_negative",
                    "fragment_index": str(sequence + 1),
                    "video_track_timescale": str(
                        int(1 / input_vstream.time_base)),
                },
                # Only do extra fragmenting if we are using ll_hls
                # Let ffmpeg do the work using frag_duration
                # Fragment durations may exceed the 15% allowed variance but it seems ok
                **({
                    "movflags":
                    "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                    # Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
                    # a "Part" that can be combined with the data from all the other "Part"s, plus an init
                    # section, to reconstitute the data in a "Segment".
                    # The LL-HLS spec allows for a fragment's duration to be within the range [0.85x,1.0x]
                    # of the part target duration. We use the frag_duration option to tell ffmpeg to try to
                    # cut the fragments when they reach frag_duration. However, the resulting fragments can
                    # have variability in their durations and can end up being too short or too long. If
                    # there are two tracks, as in the case of a video feed with audio, the fragment cut seems
                    # to be done on the first track that crosses the desired threshold, and cutting on the
                    # audio track may result in a shorter video fragment than desired. Conversely, with a
                    # video track with no audio, the discrete nature of frames means that the frame at the
                    # end of a fragment will sometimes extend slightly beyond the desired frag_duration.
                    # Given this, our approach is to use a frag_duration near the upper end of the range for
                    # outputs with audio using a frag_duration at the lower end of the range for outputs with
                    # only video.
                    "frag_duration":
                    str(self._stream_settings.part_target_duration * (98e4 if add_audio else 9e5)),
                } if self._stream_settings.ll_hls else {}),
            },
        )
        output_vstream = container.add_stream(template=input_vstream)
        # Check if audio is requested
        output_astream = None
        if add_audio:
            output_astream = container.add_stream(template=input_astream)
        return container, output_vstream, output_astream

    def set_streams(
        self,
        video_stream: av.video.VideoStream,
        audio_stream: Any,
        # no type hint for audio_stream until https://github.com/PyAV-Org/PyAV/pull/775 is merged
    ) -> None:
        """Initialize output buffer with streams from container."""
        self._input_video_stream = video_stream
        self._input_audio_stream = audio_stream

    def reset(self, video_dts: int) -> None:
        """Initialize a new stream segment."""
        # Keep track of the number of segments we've processed
        self._sequence += 1
        self._part_start_dts = self._segment_start_dts = video_dts
        self._segment = None
        self._memory_file = BytesIO()
        self._memory_file_pos = 0
        (
            self._av_output,
            self._output_video_stream,
            self._output_audio_stream,
        ) = self.make_new_av(
            memory_file=self._memory_file,
            sequence=self._sequence,
            input_vstream=self._input_video_stream,
            input_astream=self._input_audio_stream,
        )
        if self._output_video_stream.name == "hevc":
            self._output_video_stream.codec_tag = "hvc1"

    def mux_packet(self, packet: av.Packet) -> None:
        """Mux a packet to the appropriate output stream."""

        # Check for end of segment
        if packet.stream == self._input_video_stream:
            if (packet.is_keyframe and
                (packet.dts - self._segment_start_dts) * packet.time_base >=
                    self._stream_settings.min_segment_duration):
                # Flush segment (also flushes the stub part segment)
                self.flush(packet, last_part=True)

            # Mux the packet
            packet.stream = self._output_video_stream
            self._av_output.mux(packet)
            self.check_flush_part(packet)
            self._part_has_keyframe |= packet.is_keyframe

        elif packet.stream == self._input_audio_stream:
            packet.stream = self._output_audio_stream
            self._av_output.mux(packet)

    def check_flush_part(self, packet: av.Packet) -> None:
        """Check for and mark a part segment boundary and record its duration."""
        if self._memory_file_pos == self._memory_file.tell():
            return
        if self._segment is None:
            # We have our first non-zero byte position. This means the init has just
            # been written. Create a Segment and put it to the queue of each output.
            self._segment = Segment(
                sequence=self._sequence,
                stream_id=self._stream_id,
                init=self._memory_file.getvalue(),
                # Fetch the latest StreamOutputs, which may have changed since the
                # worker started.
                stream_outputs=self._outputs_callback().values(),
                start_time=self._start_time,
            )
            self._memory_file_pos = self._memory_file.tell()
        else:  # These are the ends of the part segments
            self.flush(packet, last_part=False)

    def flush(self, packet: av.Packet, last_part: bool) -> None:
        """Output a part from the most recent bytes in the memory_file.

        If last_part is True, also close the segment, give it a duration,
        and clean up the av_output and memory_file.
        There are two different ways to enter this function, and when
        last_part is True, packet has not yet been muxed, while when
        last_part is False, the packet has already been muxed. However,
        in both cases, packet is the next packet and is not included in
        the Part.
        This function writes the duration metadata for the Part and
        for the Segment. However, as the fragmentation done by ffmpeg
        may result in fragment durations which fall outside the
        [0.85x,1.0x] tolerance band allowed by LL-HLS, we need to fudge
        some durations a bit by reporting them as being within that
        range.
        Note that repeated adjustments may cause drift between the part
        durations in the metadata and those in the media and result in
        playback issues in some clients.
        """
        # Part durations should not exceed the part target duration
        adjusted_dts = min(
            packet.dts,
            self._part_start_dts +
            self._stream_settings.part_target_duration / packet.time_base,
        )
        if last_part:
            # Closing the av_output will write the remaining buffered data to the
            # memory_file as a new moof/mdat.
            self._av_output.close()
        elif not self._part_has_keyframe:
            # Parts which are not the last part or an independent part should
            # not have durations below 0.85 of the part target duration.
            adjusted_dts = max(
                adjusted_dts,
                self._part_start_dts + 0.85 *
                self._stream_settings.part_target_duration / packet.time_base,
            )
        assert self._segment
        self._memory_file.seek(self._memory_file_pos)
        self._hass.loop.call_soon_threadsafe(
            self._segment.async_add_part,
            Part(
                duration=float(
                    (adjusted_dts - self._part_start_dts) * packet.time_base),
                has_keyframe=self._part_has_keyframe,
                data=self._memory_file.read(),
            ),
            (segment_duration := float(
                (adjusted_dts - self._segment_start_dts) *
                packet.time_base)) if last_part else 0,
        )
        if last_part:
            # If we've written the last part, we can close the memory_file.
            self._memory_file.close(
            )  # We don't need the BytesIO object anymore
            self._start_time += datetime.timedelta(seconds=segment_duration)
            # Reinitialize
            self.reset(packet.dts)
        else:
            # For the last part, these will get set again elsewhere so we can skip
            # setting them here.
            self._memory_file_pos = self._memory_file.tell()
            self._part_start_dts = adjusted_dts
        self._part_has_keyframe = False

    def discontinuity(self) -> None:
        """Mark the stream as having been restarted."""
        # Preserving sequence and stream_id here keep the HLS playlist logic
        # simple to check for discontinuity at output time, and to determine
        # the discontinuity sequence number.
        self._stream_id += 1
        self._start_time = datetime.datetime.utcnow()

    def close(self) -> None:
        """Close stream buffer."""
        self._av_output.close()
        self._memory_file.close()
Пример #57
0
    def run_test(self):
        self.url = urllib.parse.urlparse(self.nodes[0].url)
        self.log.info("Mine blocks and send Bitcoin to node 1")

        # Random address so node1's balance doesn't increase
        not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"

        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[1].generatetoaddress(100, not_related_address)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), 175)

        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        self.sync_all()

        self.log.info("Test the /tx URI")

        json_obj = self.test_rest_request("/tx/{}".format(txid))
        assert_equal(json_obj['txid'], txid)

        # Check hex format response
        hex_response = self.test_rest_request("/tx/{}".format(txid),
                                              req_type=ReqType.HEX,
                                              ret_type=RetType.OBJ)
        assert_greater_than_or_equal(
            int(hex_response.getheader('content-length')),
            json_obj['size'] * 2)

        spent = (
            json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']
        )  # get the vin to later check for utxo (should be spent by then)
        # get n of 0.1 outpoint
        n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
        spending = (txid, n)

        self.log.info("Query an unspent TXO using the /getutxos URI")

        self.nodes[1].generatetoaddress(1, not_related_address)
        self.sync_all()
        bb_hash = self.nodes[0].getbestblockhash()

        assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))

        # Check chainTip response
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        assert_equal(json_obj['chaintipHash'], bb_hash)

        # Make sure there is one utxo
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))

        self.log.info("Query a spent TXO using the /getutxos URI")

        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))

        # Check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)

        # Make sure there is no utxo in the response because this outpoint has been spent
        assert_equal(len(json_obj['utxos']), 0)

        # Check bitmap
        assert_equal(json_obj['bitmap'], "0")

        self.log.info("Query two TXOs using the /getutxos URI")

        json_obj = self.test_rest_request(
            "/getutxos/{}-{}/{}-{}".format(*(spending + spent)))

        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['bitmap'], "10")

        self.log.info(
            "Query the TXOs using the /getutxos URI with a binary response")

        bin_request = b'\x01\x02'
        for txid, n in [spending, spent]:
            bin_request += hex_str_to_bytes(txid)
            bin_request += pack("i", n)

        bin_response = self.test_rest_request("/getutxos",
                                              http_method='POST',
                                              req_type=ReqType.BIN,
                                              body=bin_request,
                                              ret_type=RetType.BYTES)
        output = BytesIO(bin_response)
        chain_height, = unpack("i", output.read(4))
        response_hash = binascii.hexlify(output.read(32)[::-1]).decode('ascii')

        assert_equal(
            bb_hash, response_hash
        )  # check if getutxo's chaintip during calculation was fine
        assert_equal(chain_height, 102)  # chain height must be 102

        self.log.info("Test the /getutxos URI with and without /checkmempool")
        # Create a transaction, check that it's found with /checkmempool, but
        # not found without. Then confirm the transaction and check that it's
        # found with or without /checkmempool.

        # do a tx and don't sync
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        json_obj = self.test_rest_request("/tx/{}".format(txid))
        # get the spent output to later check for utxo (should be spent by then)
        spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
        # get n of 0.1 outpoint
        n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
        spending = (txid, n)

        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 0)

        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)

        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
        assert_equal(len(json_obj['utxos']), 1)

        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spent))
        assert_equal(len(json_obj['utxos']), 0)

        self.nodes[0].generate(1)
        self.sync_all()

        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)

        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)

        # Do some invalid requests
        self.test_rest_request("/getutxos",
                               http_method='POST',
                               req_type=ReqType.JSON,
                               body='{"checkmempool',
                               status=400,
                               ret_type=RetType.OBJ)
        self.test_rest_request("/getutxos",
                               http_method='POST',
                               req_type=ReqType.BIN,
                               body='{"checkmempool',
                               status=400,
                               ret_type=RetType.OBJ)
        self.test_rest_request("/getutxos/checkmempool",
                               http_method='POST',
                               req_type=ReqType.JSON,
                               status=400,
                               ret_type=RetType.OBJ)

        # Test limits
        long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
        self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri),
                               http_method='POST',
                               status=400,
                               ret_type=RetType.OBJ)

        long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
        self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri),
                               http_method='POST',
                               status=200)

        self.nodes[0].generate(
            1)  # generate block to not affect upcoming tests
        self.sync_all()

        self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
        bb_hash = self.nodes[0].getbestblockhash()

        # Check result if block does not exists
        assert_equal(
            self.test_rest_request(
                '/headers/1/0000000000000000000000000000000000000000000000000000000000000000'
            ), [])
        self.test_rest_request(
            '/block/0000000000000000000000000000000000000000000000000000000000000000',
            status=404,
            ret_type=RetType.OBJ)

        # Check result if block is not in the active chain
        self.nodes[0].invalidateblock(bb_hash)
        assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)),
                     [])
        self.test_rest_request('/block/{}'.format(bb_hash))
        self.nodes[0].reconsiderblock(bb_hash)

        # Check binary format
        response = self.test_rest_request("/block/{}".format(bb_hash),
                                          req_type=ReqType.BIN,
                                          ret_type=RetType.OBJ)
        assert_greater_than(int(response.getheader('content-length')),
                            BLOCK_HEADER_SIZE)
        response_bytes = response.read()

        # Compare with block header
        response_header = self.test_rest_request(
            "/headers/1/{}".format(bb_hash),
            req_type=ReqType.BIN,
            ret_type=RetType.OBJ)
        assert_equal(int(response_header.getheader('content-length')),
                     BLOCK_HEADER_SIZE)
        response_header_bytes = response_header.read()
        assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)

        # Check block hex format
        response_hex = self.test_rest_request("/block/{}".format(bb_hash),
                                              req_type=ReqType.HEX,
                                              ret_type=RetType.OBJ)
        assert_greater_than(int(response_hex.getheader('content-length')),
                            BLOCK_HEADER_SIZE * 2)
        response_hex_bytes = response_hex.read().strip(b'\n')
        assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)

        # Compare with hex block header
        response_header_hex = self.test_rest_request(
            "/headers/1/{}".format(bb_hash),
            req_type=ReqType.HEX,
            ret_type=RetType.OBJ)
        assert_greater_than(
            int(response_header_hex.getheader('content-length')),
            BLOCK_HEADER_SIZE * 2)
        response_header_hex_bytes = response_header_hex.read(
            BLOCK_HEADER_SIZE * 2)
        assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]),
                     response_header_hex_bytes)

        # Check json format
        block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
        assert_equal(block_json_obj['hash'], bb_hash)
        assert_equal(
            self.test_rest_request("/blockhashbyheight/{}".format(
                block_json_obj['height']))['blockhash'], bb_hash)

        # Check hex/bin format
        resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(
            block_json_obj['height']),
                                          req_type=ReqType.HEX,
                                          ret_type=RetType.OBJ)
        assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
        resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(
            block_json_obj['height']),
                                            req_type=ReqType.BIN,
                                            ret_type=RetType.BYTES)
        blockhash = binascii.hexlify(resp_bytes[::-1]).decode('utf-8')
        assert_equal(blockhash, bb_hash)

        # Check invalid blockhashbyheight requests
        resp = self.test_rest_request("/blockhashbyheight/abc",
                                      ret_type=RetType.OBJ,
                                      status=400)
        assert_equal(resp.read().decode('utf-8').rstrip(),
                     "Invalid height: abc")
        resp = self.test_rest_request("/blockhashbyheight/1000000",
                                      ret_type=RetType.OBJ,
                                      status=404)
        assert_equal(resp.read().decode('utf-8').rstrip(),
                     "Block height out of range")
        resp = self.test_rest_request("/blockhashbyheight/-1",
                                      ret_type=RetType.OBJ,
                                      status=400)
        assert_equal(resp.read().decode('utf-8').rstrip(),
                     "Invalid height: -1")
        self.test_rest_request("/blockhashbyheight/",
                               ret_type=RetType.OBJ,
                               status=400)

        # Compare with json block header
        json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
        assert_equal(len(json_obj),
                     1)  # ensure that there is one header in the json response
        assert_equal(json_obj[0]['hash'],
                     bb_hash)  # request/response hash should be the same

        # Compare with normal RPC block response
        rpc_block_json = self.nodes[0].getblock(bb_hash)
        for key in [
                'hash', 'confirmations', 'height', 'version', 'merkleroot',
                'time', 'nonce', 'bits', 'difficulty', 'chainwork',
                'previousblockhash'
        ]:
            assert_equal(json_obj[0][key], rpc_block_json[key])

        # See if we can get 5 headers in one response
        self.nodes[1].generate(5)
        self.sync_all()
        json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
        assert_equal(len(json_obj), 5)  # now we should have 5 header objects

        self.log.info("Test tx inclusion in the /mempool and /block URIs")

        # Make 3 tx and mine them on node 1
        txs = []
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        self.sync_all()

        # Check that there are exactly 3 transactions in the TX memory pool before generating the block
        json_obj = self.test_rest_request("/mempool/info")
        assert_equal(json_obj['size'], 3)
        # the size of the memory pool should be greater than 3x ~100 bytes
        assert_greater_than(json_obj['bytes'], 300)

        # Check that there are our submitted transactions in the TX memory pool
        json_obj = self.test_rest_request("/mempool/contents")
        for i, tx in enumerate(txs):
            assert tx in json_obj
            assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
            assert_equal(json_obj[tx]['depends'], txs[i - 1:i])

        # Now mine the transactions
        newblockhash = self.nodes[1].generate(1)
        self.sync_all()

        # Check if the 3 tx show up in the new block
        json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
        non_coinbase_txs = {
            tx['txid']
            for tx in json_obj['tx'] if 'coinbase' not in tx['vin'][0]
        }
        assert_equal(non_coinbase_txs, set(txs))

        # Check the same but without tx details
        json_obj = self.test_rest_request("/block/notxdetails/{}".format(
            newblockhash[0]))
        for tx in txs:
            assert tx in json_obj['tx']

        self.log.info("Test the /chaininfo URI")

        bb_hash = self.nodes[0].getbestblockhash()

        json_obj = self.test_rest_request("/chaininfo")
        assert_equal(json_obj['bestblockhash'], bb_hash)
Пример #58
0
def read_float(x: BytesIO) -> float:
    return struct.unpack("f", x.read(4))[0]
Пример #59
0
class MultipartPart(object):
    def __init__(self,
                 buffer_size=2**16,
                 memfile_limit=2**18,
                 charset='latin1'):
        self.headerlist = []
        self.headers = None
        self.file = False
        self.size = 0
        self._buf = tob('')
        self.disposition, self.name, self.filename = None, None, None
        self.content_type, self.charset = None, charset
        self.memfile_limit = memfile_limit
        self.buffer_size = buffer_size

    def feed(self, line, nl=''):
        if self.file:
            return self.write_body(line, nl)
        return self.write_header(line, nl)

    def write_header(self, line, nl):
        line = line.decode(self.charset or 'latin1')
        if not nl:
            raise MultipartError('Unexpected end of line in header.')
        if not line.strip():  # blank line -> end of header segment
            self.finish_header()
        elif line[0] in ' \t' and self.headerlist:
            name, value = self.headerlist.pop()
            self.headerlist.append((name, value + line.strip()))
        else:
            if ':' not in line:
                raise MultipartError("Syntax error in header: No colon.")
            name, value = line.split(':', 1)
            self.headerlist.append((name.strip(), value.strip()))

    def write_body(self, line, nl):
        if not line and not nl:
            return  # This does not even flush the buffer
        self.size += len(line) + len(self._buf)
        self.file.write(self._buf + line)
        self._buf = nl
        if self.content_length > 0 and self.size > self.content_length:
            raise MultipartError('Size of body exceeds Content-Length header.')
        if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
            # TODO: What about non-file uploads that exceed the memfile_limit?
            self.file, old = TemporaryFile(mode='w+b'), self.file
            old.seek(0)
            copy_file(old, self.file, self.size, self.buffer_size)

    def finish_header(self):
        self.file = BytesIO()
        self.headers = Headers(self.headerlist)
        cdis = self.headers.get('Content-Disposition', '')
        ctype = self.headers.get('Content-Type', '')
        clen = self.headers.get('Content-Length', '-1')
        if not cdis:
            raise MultipartError('Content-Disposition header is missing.')
        self.disposition, self.options = parse_options_header(cdis)
        self.name = self.options.get('name')
        self.filename = self.options.get('filename')
        self.content_type, options = parse_options_header(ctype)
        self.charset = options.get('charset') or self.charset
        self.content_length = int(self.headers.get('Content-Length', '-1'))

    def is_buffered(self):
        ''' Return true if the data is fully buffered in memory.'''
        return isinstance(self.file, BytesIO)

    @property
    def value(self):
        ''' Data decoded with the specified charset '''
        pos = self.file.tell()
        self.file.seek(0)
        val = self.file.read()
        self.file.seek(pos)
        return val.decode(self.charset)

    def save_as(self, path):
        fp = open(path, 'wb')
        pos = self.file.tell()
        try:
            self.file.seek(0)
            size = copy_file(self.file, fp)
        finally:
            self.file.seek(pos)
        return size
Пример #60
0
    def send_and_receive(self, ajp_request):
        '''Send the request and receive the response.

        :type ajp_request: AjpForwardRequest with all request data.
        :return: :class:`AjpResponse <AjpResponse>` object
        :rtype: ajp4py.AjpResponse
        '''
        # Add this socket's local port as a request attribute.
        attrs = ajp_request.request_attributes
        attrs.append(
            ATTRIBUTE(AjpAttribute.REQ_ATTRIBUTE,
                      ('AJP_REMOTE_PORT', str(self._socket.getsockname()[1]))))
        PROTOCOL_LOGGER.debug('Request attributes: %s',
                              ajp_request.request_attributes)

        # Serialize the non-data part of the request.
        request_packet = ajp_request.serialize_to_packet()
        self._socket.sendall(request_packet)

        # Serialize the data (if any).
        _prefix_code = AjpPacketHeadersFromContainer.GET_BODY_CHUNK
        _resp_buffer = None
        for packet in ajp_request.serialize_data_to_packet():
            # As each data packet is sent, make sure the servlet container
            # responds with a GET_BODY_CHUNK header and send more if there
            # is any.
            if _prefix_code == AjpPacketHeadersFromContainer.GET_BODY_CHUNK:
                self._socket.sendall(packet)
                _data = self._socket.recv(self.RESPONSE_HEADER_LENGTH)
                _resp_buffer = BytesIO(_data)
                _, _data_len, _prefix_code = unpack_bytes('>HHb', _resp_buffer)
                _resp_buffer = BytesIO(self._socket.recv(_data_len - 1))

        ajp_resp = AjpResponse()
        _resp_content = b''

        # Data has been sent. Now parse the reply making sure to 'offset'
        # anything read from the socket already by sending the BytesIO
        # object if there is one.
        while _prefix_code != AjpPacketHeadersFromContainer.END_RESPONSE:

            if not _resp_buffer:
                _data = self._socket.recv(self.RESPONSE_HEADER_LENGTH)
                _magic, _data_len, _prefix_code = unpack_bytes(
                    '>HHb', BytesIO(_data))
                _resp_buffer = BytesIO(self._socket.recv(_data_len - 1))

            if _prefix_code == AjpPacketHeadersFromContainer.SEND_HEADERS:

                status_code, = unpack_bytes('>H', _resp_buffer)
                _, = unpack_as_string(_resp_buffer)
                setattr(ajp_resp, '_response_headers',
                        self._read_response_headers(_resp_buffer))
                setattr(ajp_resp, '_status_code', status_code)
                setattr(ajp_resp, '_status_msg',
                        lookup_status_by_code(status_code).description)

            elif _prefix_code == AjpPacketHeadersFromContainer.SEND_BODY_CHUNK:

                _data_len, = unpack_bytes('>H', _resp_buffer)
                _resp_content += _resp_buffer.read(_data_len + 1)

            elif _prefix_code == AjpPacketHeadersFromContainer.END_RESPONSE:

                _, = unpack_bytes('b', _resp_buffer)

            else:

                PROTOCOL_LOGGER.error('Unknown value for _prefix_code:%d',
                                      _prefix_code)
                raise NotImplementedError

            # Clear the response buffer for the next iteration.
            _resp_buffer = None

        setattr(ajp_resp, '_ajp_request', ajp_request)
        setattr(ajp_resp, '_content', _resp_content)
        return ajp_resp