Example #1
0
 def write(to_file, data_array, write_offset, order):
     data = data_array.tobytes(order)
     fd = os.open(to_file, os.O_RDWR | os.O_APPEND)
     os.pwrite(fd, data, write_offset)
     os.close(fd)
     del data_array
     del data
Example #2
0
def pwrite_absolute_pos(
    fd: int,
    chunk: bytes,
    range_start: int,
    pos: int,
):
    os.pwrite(fd, chunk, pos)
Example #3
0
def hash_genesis_block():

    blockheader = (
        "02000000" +
        "a4051e368bfa0191e6c747507dd0fdb03da1a0a54ed14829810b97c6ac070000" +
        "e932b0f6b8da85ccc464d9d5066d01d904fb05ae8d1ddad7095b9148e3f08ba6" +
        "bcfb6459" + "f0ff0f1e" + "3682bb08")
    print("txdata:%s" % blockheader)
    blockheader_bin = binascii.unhexlify(swap_order(blockheader))
    tx_data = blockheader_bin

    # Open files
    fd_h2c = os.open("/dev/xdma/card0/h2c0", os.O_WRONLY)
    fd_c2h = os.open("/dev/xdma/card0/c2h0", os.O_RDONLY)

    start_time = time.time()
    # Send to FPGA
    os.pwrite(fd_h2c, tx_data, 0)

    # Receive from FPGA
    rx_data = os.pread(fd_c2h, 32, 0)
    end_time = time.time()
    delay = end_time - start_time
    blockheder_rx = codecs.encode(rx_data, 'hex').decode('ascii')
    print("rxdata:%s" % swap_order(blockheder_rx)[0:64])
    print("Time elapsed:%f microsec" % (delay * 1000000))
    os.close(fd_h2c)
    os.close(fd_c2h)
Example #4
0
 def update_file(self, author, time, message):
     shorten_time = time.split('.')[0]
     index = os.open('.lgit/index', os.O_RDWR)
     with open('.lgit/index', 'r+') as file1:
         content = file1.readlines()
         if not content:
             print('On branch master\nnothing to commit, working ' +
                   'directory clean')
             exit(1)
         else:
             position = 0
             commited = False
             for line in content:
                 length = len(line)
                 line = line.strip().split(' ')
                 if line[2] == line[3]:
                     position += length
                 elif line[2] != line[3]:
                     commited = True
                     position += len(line[0]) + len(line[1]) + \
                         len(line[2]) + 3
                     os.pwrite(index, str.encode(line[2]), position)
                     position += len(line[2]) + len(line[-1]) + 2
                     with open('.lgit/snapshots/' + self.time,
                               'a+') as file2:
                         file2.write(line[2] + ' ' + line[-1] + '\n')
             if commited is False:
                 print('On branch master\nnothing to commit, working ' +
                       'directory clean')
                 exit(1)
             else:
                 with open('.lgit/commits/' + self.time, 'w') as file:
                     file.write(author + "\n" + shorten_time + "\n\n" +
                                message)
Example #5
0
 def check_deleted_file(self):
     index = os.open('.lgit/.deleted', os.O_RDWR)
     with open('.lgit/.deleted') as f:
         start = 0
         flag = False
         for deleted in f:
             delete = deleted.strip().split(' ')
             blank = " "*len(delete[0])
             if '[deletedbyLGIT]' in delete[0]:
                 os.pwrite(index, str.encode(blank), start)
             start += len(deleted)
     del_content = []
     content = []
     with open('.lgit/.deleted') as f:
         for deleted in f:
             delete = deleted.strip()
             content.append(delete)
     for item in content:
         if item:
             item += '\n'
             del_content.append(item)
     os.unlink('.lgit/.deleted')
     with open('.lgit/.deleted', 'a+') as file:
         for line in del_content:
             file.write(line)
Example #6
0
def shred_file(path: str, passes: int, max_filename: int):
    print(f"[*] Current file: {path}")
    valid_chars = string.ascii_letters + string.digits
    valid_bytes = [chr(c) for c in range(0xFF + 1)]
    raw_byte_encode = "latin1"
    filesize = os.path.getsize(path)
    if (os.path.isfile(path) == True and filesize > 0):
        for temp in range(passes):
            #Overwrite file with random raw bytes
            for i in range(filesize):
                fd = os.open(path, os.O_WRONLY | os.O_NOCTTY)
                os.pwrite(fd,
                          random.choice(valid_bytes).encode(raw_byte_encode),
                          i)
                os.close(fd)

            #Rename File
            new_name = "".join(
                random.choices(valid_chars,
                               k=random.choice(range(1, max_filename + 1))))
            new_path = f"{dir_char}".join(
                path.split(f"{dir_char}")[0:-1]) + f"{dir_char}{new_name}"
            if (len(path.split(f"{dir_char}")) == 1):
                new_path = new_name
            os.rename(path, new_path)
            path = new_path

        #Remove file after completing all passes
        os.remove(path)
Example #7
0
    def _put(self, i, data):
        if self.closed:
            return False
        if i < 0 or self.length <= i:
            raise IndexError("index {} out of range ([0, {}])".format(
                i, self.length - 1))

        self._open_fds()

        index_ofst = self.buflen * i
        fcntl.flock(self.cache_fd, fcntl.LOCK_EX)
        buf = os.pread(self.cache_fd, self.buflen, index_ofst)
        (o, l) = unpack('Qq', buf)

        if l >= 0 and o >= 0:
            # Already data exists
            fcntl.flock(self.cache_fd, fcntl.LOCK_UN)
            return False

        data_pos = os.lseek(self.cache_fd, 0, os.SEEK_END)
        if self.cache_size_limit:
            if self.cache_size_limit < (data_pos + len(data)):
                self._frozen = True
                fcntl.flock(self.cache_fd, fcntl.LOCK_UN)
                return False

        index_entry = pack('Qq', data_pos, len(data))
        assert os.pwrite(self.cache_fd, index_entry, index_ofst) == self.buflen
        assert os.pwrite(self.cache_fd, data, data_pos) == len(data)
        os.fsync(self.cache_fd)
        fcntl.flock(self.cache_fd, fcntl.LOCK_UN)
        return True
Example #8
0
async def _pcs_request(client, method='download', path=pcs_root, api=api_url, wfd=None, stream=None, json=False, headers={}, height=None, width=None):
    payload = {'method':method, 'access_token':access_token, 'path':path}
    if api == api_thumb_url:
        payload['height'] = height
        payload['width'] = width
    response = await client.get(api, params=payload, headers=headers)
    data = b''
    if json:
        data = await response.json()
        await response.release()
        return data
    if headers is not None and 'Range' in headers:
        offset = int(headers['Range'][6:].split('-')[0])
    try:
        while True:
            tmp = await response.content.read(4096)
            if not tmp:
                break
            if stream is not None and stream["off"] >= offset:
                if len(data) > 0:
                    stream["fd"].write(data)
                    stream["off"] += len(data)
                    data = b''
                stream["fd"].write(tmp)
                stream["off"] += len(tmp)
            elif wfd is None:
                data += tmp
            else:
                os.pwrite(wfd, tmp, offset)
                offset += len(tmp)
    finally:
        await response.release()
    return data
Example #9
0
 def _read_completed(fd, buf, offset, err):
     logging.debug('Writing %d B to offset %d B', buf.size(), offset)
     os.pwrite(fd, buf.to_bytearray(), offset)
     self.copies[-1].copied += buf.size()
     STATE.write()
     # By returning 1 here we auto-retire the aio_pread command.
     return 1
Example #10
0
 def setUp(self):
     # Create a temporary directory
     self.temp_dir = tempfile.mkdtemp()
     self.temp_files = {"File{}".format(x) : tempfile.mkstemp(prefix=str(x), dir=self.temp_dir) for x in range(4)}
     for tkey in self.temp_files:
         tfp, tfname = self.temp_files[tkey]
         os.pwrite(tfp, bytes("{}\n".format(tkey), ENCODING), 0)
         os.close(tfp)
Example #11
0
def mess_file(path):
    file_size = os.path.getsize(path)

    offset = random.randint(0, file_size)

    fd = os.open(path, os.O_WRONLY)
    os.pwrite(fd, os.urandom(1000), offset)
    os.close(fd)
Example #12
0
def mem_test_random():
    # Status
    test_ok = True
    test_msg = "OK\n"

    # This is the only number that should need to change- how many MB to generate
    # 256M16 part is 512MB; 512M16 part is 1024GB
    NUM_MB = 1024

    # Generate some data
    TRANSFER_SIZE = 1024 * 1024 * 4
    NUM_TRANSFERS = int(NUM_MB / 4)
    tx_data = []
    rx_data = []
    for page in range(NUM_TRANSFERS):
        tx_data.append(bytearray(numpy.random.bytes(TRANSFER_SIZE)))

    # Open files
    fd_h2c = os.open("/dev/xdma0_h2c_0", os.O_WRONLY)
    fd_c2h = os.open("/dev/xdma0_c2h_0", os.O_RDONLY)

    # Send to FPGA block RAM
    start = time.time()
    for page in range(NUM_TRANSFERS):
        os.pwrite(fd_h2c, tx_data[page], page * TRANSFER_SIZE)
    end = time.time()
    duration = end - start

    # Print time
    BPS = TRANSFER_SIZE * NUM_TRANSFERS / (duration)
    print("Sent in " + str((duration) * 1000.0) + " milliseconds (" +
          str(BPS / 1000000) + " MBPS)")

    # Receive from FPGA block RAM
    start = time.time()
    for page in range(NUM_TRANSFERS):
        rx_data.append(os.pread(fd_c2h, TRANSFER_SIZE, page * TRANSFER_SIZE))
    end = time.time()
    duration = end - start

    # Print time
    BPS = TRANSFER_SIZE * NUM_TRANSFERS / (duration)
    print("Received in " + str((duration) * 1000.0) + " milliseconds (" +
          str(BPS / 1000000) + " MBPS)")

    # Make sure data matches
    for page in range(NUM_TRANSFERS):
        if tx_data[page] != rx_data[page]:
            test_ok = False
            test_msg = "Whoops on page " + str(page) + "\n"
            print(test_msg)

    with open('err.log', 'a') as errlog:
        errlog.write(test_msg)

    os.close(fd_h2c)
    os.close(fd_c2h)
def create_inventory(device_ip):
    click.secho("Creating ansible inventory", fg="blue", bold=True)
    inventory_str = \
        "%s deploy_sample_content=False do_image_preparation=True\n" % \
        (device_ip,)
    inventory_fd, inventory_name = tempfile.mkstemp()
    os.pwrite(inventory_fd, inventory_str.encode("utf-8"), 0)
    os.close(inventory_fd)
    return inventory_name
Example #14
0
def procmem_write(path, pid, gvar_name, symoff_table, base_addrs, write_data):
    fd = os.open("/proc/{}/mem".format(pid), os.O_RDWR)
    symoff = symoff_table[gvar_name]
    base_addr = base_addrs[path]
    print("access addr = 0x{:x}".format(base_addr + symoff))
    print("before: 0x{:x}".format(
        struct.unpack("<I", os.pread(fd, 4, base_addr + symoff))[0]))
    os.pwrite(fd, struct.pack("<I", write_data), base_addr + symoff)
    print(" after: 0x{:x}".format(
        struct.unpack("<I", os.pread(fd, 4, base_addr + symoff))[0]))
Example #15
0
def test_read_write(loopback_fs_root):
    (loopback_fs_root / "file").write_bytes(b"abcdef")

    fd = os.open(loopback_fs_root / "file", os.O_RDWR)

    try:
        os.pwrite(fd, b"xxx", 2)
        assert os.pread(fd, 3, 1) == b"bxx"
        os.fsync(fd)
    finally:
        os.close(fd)
Example #16
0
 def _read_completed(fd, buf, offset, err):
     logging.debug('Writing %d B to offset %d B', buf.size(), offset)
     os.pwrite(fd, buf.to_bytearray(), offset)
     copy.copied += buf.size()
     STATE.write()
     # Everything is running in the same thread, so we can safely call
     # keepalive here as that makes it more spread out (lower chance of
     # not being called for a long time)
     if keepalive is not None:
         keepalive()
     # By returning 1 here we auto-retire the aio_pread command.
     return 1
Example #17
0
 def control_write(self, struct, elem, field_name: str,
                   sz=None, off=0, subfield=None):
     # Revisit: handle subfield
     field = getattr(elem.__class__, field_name)
     off, sz = self.field_off_sz(field, sz=sz, off=off)
     if struct.fullEntryWrite:
         sz = elem.Size
         off -= (off % sz) # align (downwards) to sz
     # print('  control_write: off={}, sz={}'.format(off, sz)) # Revisit
     # re-open file
     with struct.path.open(mode='rb+') as f:
         struct.set_fd(f)
         os.pwrite(struct.fd, struct.data[off:off+sz], off)
Example #18
0
    def _put(self, i, data):
        if self.closed:
            return
        if i < 0 or self.length <= i:
            raise IndexError("index {} out of range ([0, {}])".format(
                i, self.length - 1))

        if self.cache_size_limit:
            if self.cache_size_limit < (self.pos + len(data)):
                self._frozen = True
                return False

        offset = self.buflen * i
        with self.lock.wrlock():
            buf = os.pread(self.cachefp.fileno(), self.buflen, offset)
            (o, l) = unpack('Qq', buf)
            if l >= 0 and o >= 0:
                # Already data exists
                return False

            pos = self.pos
            '''Notes on possibility of partial write

            write(3) says partial writes ret<nbyte may happen in
            case nbytes>PIPE_BUF. In Linux 5.0 PIPE_BUF is
            4096 so partial writes do not happen when writing
            index, but they may happen when writing data. We
            hope it is rare, it seems to happen mostly in case
            of multiple writer processes, disk full and
            ``EINTR``.

            CPython does care this case by retrying
            ``pwrite(2)`` as long as it returns ``-1`` . But
            returns when the return value is positive. We'd
            better care that case.

            '''
            buf = pack('Qq', pos, len(data))
            r = os.pwrite(self.cachefp.fileno(), buf, offset)
            assert r == self.buflen

            current_pos = pos
            while current_pos - pos < len(data):
                r = os.pwrite(self.cachefp.fileno(), data[current_pos - pos:],
                              current_pos)
                assert r > 0
                current_pos += r
            assert current_pos - pos == len(data)

            self.pos += len(data)
            return True
Example #19
0
def write_random_signatures(target, target_size_bytes, number):
    signature_map = [(uuid.uuid4().bytes,
                      random.randint(0, int(
                          (target_size_bytes - 16) / 16)) * 16)
                     for _ in range(number)]

    target_fd = open_target_sync(target)
    try:
        for sig, offset in signature_map:
            os.pwrite(target_fd, sig, offset)
    finally:
        os.close(target_fd)

    return signature_map
Example #20
0
    def issue_single_write(file_name, offset, bs: Size, size: Size):
        """
        :size: Amount of data to be written to
        :bs: Each write io block size
        :offset: From which offset of the file to star write
        """
        block = os.urandom(bs.B)
        left = size.B

        fd = os.open(file_name, os.O_RDWR)
        while left > 0:
            os.pwrite(fd, block, offset + size.B - left)
            left -= bs.B

        os.close(fd)
Example #21
0
        def write(self, buf, offset):

            # The writing of large files as images
            # are done by chunks of 4096 bytes.
            # We can assume that if the size of the buffer is lower
            # than 4096 then it must be the last chunk
            CHUNK_SIZE = 4096

            if self.iolock:
                self.iolock.acquire()
                try:
                    self.file.seek(offset)
                    self.file.write(buf)

                    res = len(buf)

                    # The file should be uploaded if its in the public folder
                    if self.path.split(
                            '/')[1] == 'public' and len(buf) < CHUNK_SIZE:
                        client.upload(self.path)

                    return res
                finally:
                    self.iolock.release()
            else:
                res = os.pwrite(self.fd, buf, offset)

                # The file should be uploaded if its in the public folder
                if self.path.split(
                        '/')[1] == 'public' and len(buf) < CHUNK_SIZE:
                    client.upload(self.path)

                return res
Example #22
0
    def __init__(self,
                 length,
                 do_pickle=False,
                 dir=None,
                 cache_size_limit=None,
                 verbose=False):
        self.length = length
        self.do_pickle = do_pickle
        self.verbose = verbose
        if self.length <= 0 or (2**64) <= self.length:
            raise ValueError("length has to be between 0 and 2^64")

        if not (cache_size_limit is None or
                (isinstance(cache_size_limit, numbers.Number)
                 and 0 <= cache_size_limit)):
            msg = "cache_size_limit has to be either None, zero " \
                  "(both indicate unlimited) or larger than 0. " \
                  "{} is specified.".format(cache_size_limit)
            raise ValueError(msg)
        self.cache_size_limit = cache_size_limit

        if dir is None:
            self.dir = _DEFAULT_CACHE_PATH
        else:
            self.dir = dir
        os.makedirs(self.dir, exist_ok=True)
        _check_local(self.dir)

        self.closed = False
        self._frozen = False
        self._master_pid = os.getpid()
        self.cache_file = _NoOpenNamedTemporaryFile(self.dir, self._master_pid)
        cache_fd = os.open(self.cache_file.name, os.O_RDWR)

        if self.verbose:
            print('created cache file:', self.cache_file.name)

        try:
            fcntl.flock(cache_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

            # Fill up indices part of the cache file by index=0, size=-1
            buf = pack('Qq', 0, -1)
            self.buflen = calcsize('Qq')
            assert self.buflen == 16
            for i in range(self.length):
                offset = self.buflen * i
                r = os.pwrite(cache_fd, buf, offset)
                assert r == self.buflen
        except OSError as ose:
            # Lock acquisition error -> No problem, since other worker
            # should be already working on it
            if ose.errno not in (errno.EACCES, errno.EAGAIN):
                raise
        finally:
            fcntl.flock(cache_fd, fcntl.LOCK_UN)
            os.close(cache_fd)

        # Open lazily at the first call of get or put in each child process
        self._fd_pid = None
        self.cache_fd = None
Example #23
0
    def append(self, data):
        self._ref += 1

        pos = self.pos
        r = os.pwrite(self.datafile, data, pos)
        assert len(data) == r
        self.pos += len(data)

        crc = binascii.crc32(data)
        buf = pack(self.index_format, self.gen, pos, len(data), crc, 1)
        # print('append>', self.front, crc, data, self.pos, len(data))
        r = os.pwrite(self.indexfile, buf, self.index_pos)
        assert 32 == r  # calcsize(self.index_format)
        self.index[pos] = (len(data), crc, 1, self.index_pos)
        self.index_pos += r

        return pos, len(data)
        def _recv_cb(data):
            nonlocal count

            if count + len(data) > partsize:
                if callback != None:
                    callback('Eillegal')

                return

            self.send_msg('Success')

            os.pwrite(fd,data,off + count)
            count += len(data)

            if count == partsize:
                if callback != None:
                    callback()
        def _recv_cb(data):
            nonlocal count

            if count + len(data) > partsize:
                if callback != None:
                    callback('Eillegal')

                return

            self.send_msg('Success')

            os.pwrite(fd, data, off + count)
            count += len(data)

            if count == partsize:
                if callback != None:
                    callback()
Example #26
0
 def check_deleted_file(self):
     index = os.open('.lgit/.deleted', os.O_RDWR)
     with open('.lgit/index') as file:
         for line in file:
             line = line[:-1].split(' ')
             if not os.path.exists(line[-1]):
                 with open('.lgit/.deleted') as f:
                     start = 0
                     flag = False
                     for deleted in f:
                         delete = deleted.strip().split(' ')
                         added = '[deletedbyLGIT]' + line[-1]
                         if added in delete or line[-1] in delete:
                             flag = True
                         start += len(deleted)
                     if flag is False:
                         del_name = line[-1] + '\n'
                         os.pwrite(index, str.encode(del_name), start)
Example #27
0
 def write(self, buf, offset):
     if self.iolock:
         self.iolock.acquire()
         try:
             self.file.seek(offset)
             return self.file.write(buf)
         finally:
             self.iolock.release()
     else:
         return os.pwrite(self.fd, buf, offset)
Example #28
0
def test_lseek(loopback_fs_root):
    if not hasattr(os, "SEEK_DATA"):
        return

    fd = os.open(loopback_fs_root / "file", os.O_WRONLY | os.O_CREAT)
    try:
        os.pwrite(fd, b"abc", 1024 * 1024)
    finally:
        os.close(fd)

    # Note that the file must be reopened like this for lseek to work when the writeback
    # cache is enabled.

    fd = os.open(loopback_fs_root / "file", os.O_RDONLY)
    try:
        assert os.lseek(fd, 0, os.SEEK_DATA) > 0
        assert os.lseek(fd, 0, os.SEEK_HOLE) == 0
    finally:
        os.close(fd)
    def read_block_worker(self):
        _blocks = 0
        try:
            while True:
                block = None
                try:
                    block = self._read_blocks.get(timeout=1)
                    self._read_blocks.task_done()
                except queue.Empty:
                    if self._read_block_worker_quit:
                        break

                if self._error.has_error or block is None:
                    continue

                try:
                    bs = logicService.readDisk(DIFF_HOST_IDENT(block),
                                               DISK_INDEX(block),
                                               SECTOR_OFFSET(block), 128)

                    if DISK_INDEX(block) == 30 and os.path.exists(
                            '/dev/shm/test_cluster_diff'):
                        r_bin = os.pread(IMAGE_HANDLE(block), len(bs),
                                         512 * SECTOR_OFFSET(block))
                        if r_bin != bs:
                            with open('/dev/shm/error_bin', 'wb') as www:
                                www.write(bs)
                            raise Exception(
                                '发现数据不同?! disk_idx : {}  sector_offset : {}'.
                                format(DISK_INDEX(block),
                                       SECTOR_OFFSET(block)))
                    else:
                        os.pwrite(IMAGE_HANDLE(block), bs,
                                  512 * SECTOR_OFFSET(block))
                    self.calc_hash_value(block, bs)

                    _blocks += 1
                except Exception as e:
                    _logger.error(r'read_block_worker failed {} {}'.format(
                        block, e))
                    self._error.set_error(e)
        finally:
            _logger.info(r'read_block_worker read blocks : {}'.format(_blocks))
Example #30
0
def main():

    # Generate some data
    TRANSFER_SIZE = 64 * 1024
    tx_data = bytearray(os.urandom(TRANSFER_SIZE))

    # Open files
    fd_h2c = os.open("/dev/xdma0_h2c_0", os.O_WRONLY)
    fd_c2h = os.open("/dev/xdma0_c2h_0", os.O_RDONLY)

    # Send to FPGA block RAM
    start = time.time()
    os.pwrite(fd_h2c, tx_data, 0)
    end = time.time()
    duration = end - start

    # Print time
    BPS = TRANSFER_SIZE / (duration)
    print("Sent in " + str((duration) * 1000.0) + " milliseconds (" +
          str(BPS / 1000000) + " MBPS)")

    # Receive from FPGA block RAM
    start = time.time()
    rx_data = os.pread(fd_c2h, TRANSFER_SIZE, 0)
    end = time.time()
    duration = end - start

    # Print time
    BPS = TRANSFER_SIZE / (duration)
    print("Received in " + str((duration) * 1000.0) + " milliseconds (" +
          str(BPS / 1000000) + " MBPS)")

    # Make sure data matches
    if tx_data != rx_data:
        print("Whoops")
    else:
        print("OK")

    # done
    os.close(fd_h2c)
    os.close(fd_c2h)
Example #31
0
 def remove(self, fname):
     added_files = self.dictIndex()
     if fname in added_files:
         self.rIndex(fname, added_files)
         with open('.lgit/.deleted', 'a') as file:
             file.write('[deletedbyLGIT]' + fname + '\n')
     else:
         error = True
         index = os.open('.lgit/.deleted', os.O_RDWR)
         with open('.lgit/.deleted') as file:
             start = 0
             for line in file:
                 list_line = line[:-1].split(' ')
                 if fname in list_line:
                     error = False
                     added_fname = '[deletedbyLGIT]' + fname + '\n'
                     os.pwrite(index, str.encode(added_fname), start)
                     os.close(index)
                 start += len(line)
         if error is True:
             print("fatal: pathspec '%s' did not match any files" % fname)
Example #32
0
 def __write(self, buf, offset):
     '''
     Wrapper around `os.pwrite` to writes as much as
     requested, to the opened file.
     
     @param  buf:bytes   The data to write.
     @param  offset:int  Whence shall we write?
     '''
     while len(buf) > 0:
         wrote = os.pwrite(self.fd, buf, offset)
         offset += wrote
         buf = buf[wrote:]
Example #33
0
    def __init__(self,
                 length,
                 multithread_safe=False,
                 do_pickle=False,
                 dir=None,
                 cache_size_limit=None,
                 verbose=False):
        self._multithread_safe = multithread_safe
        self.length = length
        self.do_pickle = do_pickle
        if self.length <= 0 or (2**64) <= self.length:
            raise ValueError("length has to be between 0 and 2^64")

        if not (cache_size_limit is None or
                (isinstance(cache_size_limit, numbers.Number)
                 and 0 <= cache_size_limit)):
            msg = "cache_size_limit has to be either None, zero " \
                  "(both indicate unlimited) or larger than 0. " \
                  "{} is specified.".format(cache_size_limit)
            raise ValueError(msg)
        self.cache_size_limit = cache_size_limit

        if self.multithread_safe:
            self.lock = RWLock()
        else:
            self.lock = DummyLock()

        if dir is None:
            self.dir = _DEFAULT_CACHE_PATH
        else:
            self.dir = dir
        os.makedirs(self.dir, exist_ok=True)
        _check_local(self.dir)

        self.closed = False
        self.cachefp = tempfile.NamedTemporaryFile(delete=True, dir=self.dir)

        # allocate space to store 2n uint64 index buffer filled by -1.
        # the cache data will be appended after the indices.
        buf = pack('Qq', 0, -1)
        self.buflen = calcsize('Qq')
        assert self.buflen == 16
        for i in range(self.length):
            offset = self.buflen * i
            r = os.pwrite(self.cachefp.fileno(), buf, offset)
            assert r == self.buflen
        self.pos = self.buflen * self.length

        self.verbose = verbose
        if self.verbose:
            print('created cache file:', self.cachefp.name)

        self._frozen = False
Example #34
0
def main():

    # Generate some data
    TRANSFER_SIZE = 4096
    tx_data = bytearray(os.urandom(TRANSFER_SIZE))

    # Open files
    fd_h2c = os.open("/dev/xdma/card0/h2c0", os.O_WRONLY)
    fd_c2h = os.open("/dev/xdma/card0/c2h0", os.O_RDONLY)

    # Send to FPGA block RAM
    start = time.time()
    os.pwrite(fd_h2c, tx_data, 0);
    end = time.time()
    duration = end-start;

    # Print time
    BPS = TRANSFER_SIZE / (duration);
    print("Sent in " + str((duration)*1000.0) + " milliseconds (" + str(BPS/1000000) + " MBPS)")

    # Receive from FPGA block RAM
    start = time.time()
    rx_data = os.pread(fd_c2h, TRANSFER_SIZE, 0);
    end = time.time()
    duration = end-start;

    # Print time
    BPS = TRANSFER_SIZE / (duration);
    print("Received in " + str((duration)*1000.0) + " milliseconds (" + str(BPS/1000000) + " MBPS)")

    # Make sure data matches
    if tx_data != rx_data:
        print ("Whoops")
    else:
        print ("OK")

    # done
    os.close(fd_h2c)
    os.close(fd_c2h)
        def do_io(thread_id: int, running_event: threading.Event) -> None:
            path = os.path.join(self.mount, "src", "test", "data%d.log" % thread_id)
            with open(path, "wb") as f:
                # Use raw file descriptors to avoid going through python's I/O
                # buffering code.
                fd = f.fileno()

                buf_idx = 0
                buf = bufs[buf_idx]
                offset = 0

                # Repeatedly write and rewrite the same file,
                # jalternating between two different data buffers.
                running_event.set()
                while True:
                    os.pwrite(fd, buf, offset)
                    if stop.is_set():
                        return
                    offset += len(buf)
                    if offset >= max_file_length:
                        buf_idx += 1
                        buf = bufs[buf_idx % len(bufs)]
                        offset = 0
Example #36
0
def main():


    # Helper constants
    all_1s = struct.pack(">I", 0xFFFFFFFF)
    all_0s = struct.pack(">I", 0x0)

    fd = os.open("/dev/xdma0_user", os.O_RDWR)

    # Make all outputs
    os.pwrite(fd, all_0s, 0x100C)

    for inx in range(30):
        time.sleep(0.5)
        if inx & 1:
            os.pwrite(fd, all_1s, 0x1008)
        else:
            os.pwrite(fd, all_0s, 0x1008)

    os.close(fd)
Example #37
0
 def _writeoff(self, off, byts):
     os.pwrite(self.fileno, byts, off)
     self.size = max(self.size, off + len(byts))