Esempio n. 1
0
    def destroy(self, *args, **kwargs):
        # zero out the 1MB at the beginning and end of the device in the
        # hope that it will wipe any metadata from filesystems that
        # previously occupied this device
        ctx.logger.debug("zeroing out beginning and end of %s..." % self.device)
        fd = None

        try:
            fd = os.open(self.device, os.O_RDWR)
            buf = '\0' * 1024 * 1024
            os.write(fd, buf)
            os.lseek(fd, -1024 * 1024, 2)
            os.write(fd, buf)
            os.close(fd)
        except OSError as e:
            if getattr(e, "errno", None) == 28: # No space left in device
                pass
            else:
                ctx.logger.error("error zeroing out %s: %s" % (self.device, e))

            if fd:
                os.close(fd)
        except Exception as e:
            ctx.logger.error("error zeroing out %s: %s" % (self.device, e))
            if fd:
                os.close(fd)

        self.exists = False
Esempio n. 2
0
def _write(root, path, buf, offset, fh):
    f_path = full_path(root, path)
    vnfs_ops = VNFSOperations(root)
    file_name = vnfs_ops.vnfs_get_file_name(f_path)
    #if file_name == "action":
    if file_name in special_files and special_files[file_name]+'_write' in globals():
        try:
            nf_config = get_nf_config(vnfs_ops, f_path)
            # call the custom write function
            logger.info('Writing to ' + file_name  + ' in ' + 
                nf_config['nf_instance_name'] + '@' + nf_config['host'])
            ret_str = globals()[special_files[file_name]+'_write'](vnfs_ops._hypervisor, 
                nf_config, buf.rstrip("\n"))
        except errors.HypervisorError, ex:
            logger.debug('raised OSErro ' + str(ex.errno))
            raise OSError(ex.errno, os.strerror(ex.errno))
        logger.info('Successfully wrote ' + file_name + 
            ' in ' + nf_config['nf_instance_name'] + '@' + nf_config['host'])

        #if buf.rstrip("\n") == "activate":
        #    try:
        #        vnfs_ops.vnfs_deploy_nf(nf_path)
        #    except errors.VNFCreateError:
        #        #raise OSError(errno.EBUSY, os.strerror(errno.EBUSY))
        #        raise OSError(747, 'Cannot create VNF')
        #elif buf.rstrip("\n") == "stop":
        #    vnfs_ops.vnfs_stop_vnf(nf_path)
        #elif buf.rstrip("\n") == "start":
        #    vnfs_ops.vnfs_start_vnf(nf_path)
        #elif buf.rstrip("\n") == "destroy":
        #    vnfs_ops.vnfs_destroy_vnf(nf_path)
        os.lseek(fh, offset, os.SEEK_SET)
        os.write(fh, buf.rstrip("\n"))
        return len(buf)
Esempio n. 3
0
    def getLabel(self, partition):
        label = None
        fd = self.openPartition(partition)

        # valid block sizes in reiserfs are 512 - 8192, powers of 2
        # we put 4096 first, since it's the default
        # reiserfs superblock occupies either the 2nd or 16th block
        for blksize in (4096, 512, 1024, 2048, 8192):
            for start in (blksize, (blksize*16)):
                try:
                    os.lseek(fd, start, 0)
                    # read 120 bytes to get s_magic and s_label
                    buf = os.read(fd, 120)

                    # see if this block is the superblock
                    # this reads reiserfs_super_block_v1.s_magic as defined
                    # in include/reiserfs_fs.h in the reiserfsprogs source
                    m = string.rstrip(buf[52:61], "\0x00")
                    if m == "ReIsErFs" or m == "ReIsEr2Fs" or m == "ReIsEr3Fs":
                        # this reads reiserfs_super_block.s_label as
                        # defined in include/reiserfs_fs.h
                        label = string.rstrip(buf[100:116], "\0x00")
                        os.close(fd)
                        return label
                except OSError, e:
                    # [Error 22] probably means we're trying to read an
                    # extended partition. 
                    e = "error reading reiserfs label on %s: %s" %(partition.getPath(), e)
                    raise YaliException, e
Esempio n. 4
0
def nonblocking_readlines(fd, conf):

    os.lseek(fd, 0, os.SEEK_SET)

    buf = bytearray()
    remaining_bytes = conf.BUF_SIZE
    
    while remaining_bytes > 0:
        try:
            block = os.read(fd, conf.BUF_SIZE) #read BUF_SIZE-byte chunks at a time
        #deleteContent(fd)
        except BlockingIOError:
            None
        
        remaining_bytes -= len(block)
        
        #print >> sys.stderr, "data is %d Bytes"%(len(block))
        
        if not block:
            if buf:
                buf.clear()
            return None
        
        buf.extend(block)

        if remaining_bytes > 0:
            time.sleep(conf.SLEEP_TIME)
        
    return buf
Esempio n. 5
0
def _check_xml_stream(stream):
	xml_comment_start = '<!-- Base64 Signature'

	data_to_check = stream.read()

	last_comment = data_to_check.rfind('\n' + xml_comment_start)
	if last_comment < 0:
		raise SafeException(_("No signature block in XML. Maybe this file isn't signed?"))
	last_comment += 1	# Include new-line in data
	
	data = tempfile.TemporaryFile()
	data.write(data_to_check[:last_comment])
	data.flush()
	os.lseek(data.fileno(), 0, 0)

	errors = tempfile.TemporaryFile()

	sig_lines = data_to_check[last_comment:].split('\n')
	if sig_lines[0].strip() != xml_comment_start:
		raise SafeException(_('Bad signature block: extra data on comment line'))
	while sig_lines and not sig_lines[-1].strip():
		del sig_lines[-1]
	if sig_lines[-1].strip() != '-->':
		raise SafeException(_('Bad signature block: last line is not end-of-comment'))
	sig_data = '\n'.join(sig_lines[1:-1])

	if re.match('^[ A-Za-z0-9+/=\n]+$', sig_data) is None:
		raise SafeException(_("Invalid characters found in base 64 encoded signature"))
	try:
		sig_data = base64.decodestring(sig_data) # (b64decode is Python 2.4)
	except Exception as ex:
		raise SafeException(_("Invalid base 64 encoded signature: %s") % str(ex))

	sig_fd, sig_name = tempfile.mkstemp(prefix = 'injector-sig-')
	try:
		sig_file = os.fdopen(sig_fd, 'w')
		sig_file.write(sig_data)
		sig_file.close()

		# Note: Should ideally close status_r in the child, but we want to support Windows too
		child = _run_gpg([# Not all versions support this:
				  #'--max-output', str(1024 * 1024),
				  '--batch',
				  # Windows GPG can only cope with "1" here
				  '--status-fd', '1',
				  # Don't try to download missing keys; we'll do that
				  '--keyserver-options', 'no-auto-key-retrieve',
				  '--verify', sig_name, '-'],
			   stdin = data,
			   stdout = subprocess.PIPE,
			   stderr = errors)

		try:
			sigs = _get_sigs_from_gpg_status_stream(child.stdout, child, errors)
		finally:
			os.lseek(stream.fileno(), 0, 0)
			stream.seek(0)
	finally:
		os.unlink(sig_name)
	return (stream, sigs)
Esempio n. 6
0
 def test_runcmd_redirects_stdin_from_file(self):
     fd, filename = tempfile.mkstemp()
     os.write(fd, 'foobar')
     os.lseek(fd, 0, os.SEEK_SET)
     self.assertEqual(cliapp.runcmd_unchecked(['cat'], stdin=fd),
                      (0, 'foobar', ''))
     os.close(fd)
Esempio n. 7
0
    def read_disk_bytes(self, r=ALL, progresscb=do_nothing):
        if r.end == r.start:
            return ''
        with self.lock:
            fd, st = self.get_fd()
            if fd is None:
                raise IOError("Not a regular file")

            result = []
            offset = r.start
            last_res = 'x'

            os.lseek(fd, offset, os.SEEK_SET)

            if r.end == END:
                expected_end = st.st_size
            else:
                expected_end = r.end

            progresscb(offset - r.start, expected_end - offset, '')

            while last_res != '' and (r.end is END or offset < r.end):
                if r.end == END:
                    bytestoread = 4096
                else:
                    bytestoread = min(4096, r.end - offset)
                last_res = os.read(self.fd, bytestoread)
                if last_res:
                    offset += len(last_res)
                    if not progresscb(offset - r.start, expected_end - r.start, last_res):
                        result.append(last_res)

        return ''.join(result)
Esempio n. 8
0
    def prepare_files(self):
        if urlparse.urlsplit(self.inurl)[0] == 'file':
            self.infname = urllib.url2pathname(urlparse.urlsplit(self.inurl)[2])
            self.infd = open(self.infname)
        else:
            # not a file url. download it.
            source = urllib.urlopen(self.inurl)
            self.infd, self.infname = tempfile.mkstemp(prefix="transcode-in-",
                suffix="." + self.inext)
            self._files_to_clean_up_on_success.append((self.infd, self.infname))
            self._files_to_clean_up_on_error.append((self.infd, self.infname))
            while True:
                chunk = source.read(1024 * 64)
                if not chunk:
                    break
                os.write(self.infd, chunk)
            os.lseek(self.infd, 0, 0)

        self.outfd, self.outfname = tempfile.mkstemp(prefix="transcode-out-",
            suffix="." + self.tofmt)
        self._files_to_clean_up_on_error.append((self.outfd, self.outfname))

        self.errfh, self.errfname = tempfile.mkstemp(prefix="transcode-",
            suffix=".log")
        self.outurl = urlparse.urlunsplit(
            ["file", None, self.outfname, None, None])
        self._files_to_clean_up_on_success.append((self.errfh, self.errfname))
        log.debug("Reading from " + self.infname + " (" + self.inurl + ")")
        log.debug("Outputting to " + self.outfname + " (" + self.outurl + ")")
        log.debug("Errors to " + self.errfname)
Esempio n. 9
0
def _general_print(head, color, msg=None, stream=None, level="normal"):
    global LOG_CONTENT
    if not stream:
        stream = sys.stdout

    if LOG_LEVELS[level] > LOG_LEVEL:
        # skip
        return

    # encode raw 'unicode' str to utf8 encoded str
    if msg and isinstance(msg, unicode):
        msg = msg.encode("utf-8", "ignore")

    errormsg = ""
    if CATCHERR_BUFFILE_FD > 0:
        size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
        os.ftruncate(CATCHERR_BUFFILE_FD, 0)

    # append error msg to LOG
    if errormsg:
        LOG_CONTENT += errormsg

    # append normal msg to LOG
    save_msg = msg.strip() if msg else None
    if save_msg:
        global HOST_TIMEZONE
        timestr = time.strftime("[%m/%d %H:%M:%S] ", time.gmtime(time.time() - HOST_TIMEZONE))
        LOG_CONTENT += timestr + save_msg + "\n"

    if errormsg:
        _color_print("", NO_COLOR, errormsg, stream, level)

    _color_print(head, color, msg, stream, level)
Esempio n. 10
0
    def _confirm_commit_msg(self, diff_output):
        """
        Generates a commit message in a temporary file, gives the user a
        chance to edit it, and returns the filename to the caller.
        """

        fd, name = tempfile.mkstemp()
        debug("Storing commit message in temp file: %s" % name)
        os.write(fd, "Update %s to %s\n" % (self.project_name,
            self.builder.build_version))
        # Write out Resolves line for all bugzillas we see in commit diff:
        for line in extract_bzs(diff_output):
            os.write(fd, line + "\n")

        print("")
        print("##### Commit message: #####")
        print("")

        os.lseek(fd, 0, 0)
        file = os.fdopen(fd)
        for line in file.readlines():
            print line
        file.close()

        print("")
        print("###############################")
        print("")
        if self._ask_yes_no("Would you like to edit this commit message? [y/n] ", False):
            debug("Opening editor for user to edit commit message in: %s" % name)
            editor = 'vi'
            if "EDITOR" in os.environ:
                editor = os.environ["EDITOR"]
            subprocess.call(editor.split() + [name])

        return name
Esempio n. 11
0
    def write_file(self, respiter):
        if sendfile and hasattr(respiter.filelike, 'fileno') and \
               hasattr(respiter.filelike, 'tell'):

            fileno = respiter.filelike.fileno()
            fd_offset = os.lseek(fileno, 0, os.SEEK_CUR)
            fo_offset = respiter.filelike.tell()
            nbytes = max(os.fstat(fileno).st_size - fo_offset, 0)

            if self.clength:
                nbytes = min(nbytes, self.clength)

            if nbytes == 0:
                return

            self.send_headers()

            if self.is_chunked():
                self.sock.sendall("%X\r\n" % nbytes)

            self.sendfile_all(fileno, self.sock.fileno(), fo_offset, nbytes)

            if self.is_chunked():
                self.sock.sendall("\r\n")

            os.lseek(fileno, fd_offset, os.SEEK_SET)
        else:
            for item in respiter:
                self.write(item)
Esempio n. 12
0
 def setUp(self):
     self.loop = pyuv.Loop.default_loop()
     with open(TEST_FILE, "w") as f:
         f.write("begin\n")
         os.lseek(f.fileno(), 65536, os.SEEK_CUR)
         f.write("end\n")
         f.flush()
Esempio n. 13
0
    def get_output(self, code, filename=None, fd=None):
        """
        Run the specified code in Python (in a new child process) and read the
        output from the standard error or from a file (if filename is set).
        Return the output lines as a list.

        Strip the reference count from the standard error for Python debug
        build, and replace "Current thread 0x00007f8d8fbd9700" by "Current
        thread XXX".
        """
        code = dedent(code).strip()
        pass_fds = []
        if fd is not None:
            pass_fds.append(fd)
        with support.SuppressCrashReport():
            process = script_helper.spawn_python('-c', code, pass_fds=pass_fds)
            with process:
                stdout, stderr = process.communicate()
                exitcode = process.wait()
        output = support.strip_python_stderr(stdout)
        output = output.decode('ascii', 'backslashreplace')
        if filename:
            self.assertEqual(output, '')
            with open(filename, "rb") as fp:
                output = fp.read()
            output = output.decode('ascii', 'backslashreplace')
        elif fd is not None:
            self.assertEqual(output, '')
            os.lseek(fd, os.SEEK_SET, 0)
            with open(fd, "rb", closefd=False) as fp:
                output = fp.read()
            output = output.decode('ascii', 'backslashreplace')
        return output.splitlines(), exitcode
Esempio n. 14
0
 def session_read(self, fn):
     """ Reads a session file and returns a set of IDs locked by that session.
         The global lock MUST be held for this function to work, although on NFS additional
         locking is done
         Raises RepositoryError if severe access problems occur (corruption otherwise!) """
     try:
         # This can fail (thats OK, file deleted in the meantime)
         fd = self.delay_session_open(fn)
         os.lseek(fd, 0, 0)
         try:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_SH)
             try:
                 # 00)) # read up to 1 MB (that is more than enough...)
                 return pickle.loads(os.read(fd, 1048576))
             except Exception as x:
                 logger.warning(
                     "corrupt or inaccessible session file '%s' - ignoring it (Exception %s %s)."
                     % (fn, x.__class__.__name__, str(x))
                 )
         finally:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_UN)
             os.close(fd)
     except OSError as x:
         if x.errno != errno.ENOENT:
             raise RepositoryError(self.repo, "Error on session file access '%s': %s" % (fn, x))
     return set()
Esempio n. 15
0
 def test_pdeathsig_works(self):
     fd, tmpfile = tempfile.mkstemp()
     os.write(fd, '\x00' * mmap.PAGESIZE)
     os.lseek(fd, 0, os.SEEK_SET)
     buf = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_READ)
     CHILD_SIGNAL = signal.SIGTERM
     KILL_PARENT_WITH_SIGNAL = signal.SIGTERM
     try:
         args = [sys.executable, __file__, 'run_parent', self.prctl_module_name, tmpfile, str(CHILD_SIGNAL)]
         parent_pid = os.spawnv(os.P_NOWAIT, sys.executable, args)
         time.sleep(0.2)
         child_pid_line = buf.readline()
         child_pid = int(child_pid_line.strip())
         os.kill(parent_pid, KILL_PARENT_WITH_SIGNAL)
         _, exit_info = os.waitpid(parent_pid, 0)
         exit_code, received_signal = exit_info >> 8, exit_info & 0xff
         assert exit_code == 0
         assert received_signal == KILL_PARENT_WITH_SIGNAL
         time.sleep(0.2)
         buf.seek(50, os.SEEK_SET)
         child_signal_line = buf.readline()
         child_signum = int(child_signal_line.strip())
         assert child_signum == CHILD_SIGNAL
     finally:
         os.close(fd)
         if os.path.exists(tmpfile):
             os.remove(tmpfile)
Esempio n. 16
0
 def read(self, path, size, offset, fh):
     """
     Read calls are redirected here
     """
     with self.rwlock:
         os.lseek(fh, offset, 0)
         return os.read(fh, size)
Esempio n. 17
0
 def read(self):
     try:
         pid = int(os.read(self.fd, 128))
         os.lseek(self.fd, 0, os.SEEK_SET)
         return pid
     except ValueError:
         return
Esempio n. 18
0
def clixxIOSetupSHM():
    """
    Sensor Data is exchanged using Shared Memory on Linux. This
    method configures the shared memory space so that multiple
    clients can read/write to the space.
    """
    global clixxIOshmfd, clixxIOshmBuff

    if clixxIOshmfd is None:

        # Create new empty file to back memory map on disk
        if not os.path.exists(clixxIOshmPath):
            clixxIOshmfd = os.open(
                clixxIOshmPath,
                os.O_CREAT | os.O_TRUNC | os.O_RDWR)

            # Zero out the file to insure it's the right size
            os.write(clixxIOshmfd, ' ' * mmap.PAGESIZE)
            os.lseek(clixxIOshmfd, 0, os.SEEK_SET)
            os.write(clixxIOshmfd, '\n')

        else:
            clixxIOshmfd = os.open(clixxIOshmPath, os.O_RDWR)

        # Create the mmap instace with the following params:
        # fd: File descriptor which backs the mapping or -1 for anonymous mapping
        # length: Must in multiples of PAGESIZE (usually 4 KB)
        # flags: MAP_SHARED means other processes can share this mmap
        # prot: PROT_WRITE means this process can write to this mmap
        clixxIOshmBuff = mmap.mmap(
            clixxIOshmfd,
            mmap.PAGESIZE,
            mmap.MAP_SHARED,
            mmap.PROT_WRITE)
Esempio n. 19
0
    def getChunkData(self, bucket, s3file, localfile, min, max):
        """ Get the chunk (block) data from S3 bucket

        :param bucket:      s3 bucket
        :param s3file:      file name on s3
        :param localfile:   local file name
        :param min:         Minimum offset in the file
        :param max:         Maximum offset to read in the file
        :param blocksize:   block size to use for download (default: 500MB)
        """
        try:
            response = self.m_connection.make_request("GET", bucket=bucket,
                            key=s3file, headers={'Range':"bytes=%d-%d" % (min, max)})

            # Open a file descriptor with append and create flags
            fd = os.open(localfile, os.O_WRONLY|os.O_APPEND|os.O_CREAT)
            # Seek the file to 0
            os.lseek(fd, 0, os.SEEK_SET)
            # set the chunk size to different between max and min
            chunk_size = max-min

            # read the data
            data = response.read(chunk_size)
            if data == "":
                # data is null, close the file descriptor
                os.close(fd)
            else:
                # data exists, write the data to file descriptor
                os.write(fd, data)
                os.close(fd)

        except Exception as err:
            self.m_logger.error("Unable to complete the multipart download " + str(err))
            sys.exit(1)
Esempio n. 20
0
    def bdd_to_cnf(self, condition):
        """Converts the expression to conjunctive normal form

        @returns a list of terms, represented as lists of factors"""
        stdout = os.dup(1)
        temp_fd, temp_filename = tempfile.mkstemp()
        sys.stdout.flush()
        os.dup2(temp_fd, 1)
        condition.PrintMinterm()
        sys.stdout.flush()
        os.dup2(stdout, 1)
        os.close(stdout)
        os.lseek(temp_fd, 0, os.SEEK_SET)
        with os.fdopen(temp_fd) as f:
            expression = []
            for line in filter(lambda l: len(l) > 0, (l.rstrip() for l in f.readlines())):
                minterm = list(line)
                term = []
                for name in self.boolean_variables:
                    value = minterm[self.boolean_variables[name].index]
                    if value == '1':
                        term.append(name)
                    elif value == '0':
                        term.append("!" + name)
                    elif value == '-':
                        pass
                    else: fatal("Unknown setting for variable in minterm", minterm)
                expression.append(term)
            return expression
        fatal("Could not open temp file containing minterms", temp_file)
Esempio n. 21
0
File: ids.py Progetto: WatNFV/nf.io
def _read(root, path, length, offset, fh):
    f_path = full_path(root, path)
    vnfs_ops = VNFSOperations(root)
    file_name = vnfs_ops.vnfs_get_file_name(f_path)
    nf_path = ""
    if file_name in special_files:
        tokens = f_path.encode("ascii").split("/")
        last_index_to_keep = tokens.index("nf-types") + 3
        nf_path = "/".join(tokens[0:last_index_to_keep])
    if file_name == "rx_bytes":
        ret_str = vnfs_ops.vnfs_get_rx_bytes(nf_path)
        if offset >= len(ret_str):
            ret_str = ""
    elif file_name == "tx_bytes":
        ret_str = vnfs_ops.vnfs_get_tx_bytes(nf_path)
        if offset >= len(ret_str):
            ret_str = ""
    elif file_name == "pkt_drops":
        ret_str = vnfs_ops.vnfs_get_pkt_drops(nf_path)
        if offset >= len(ret_str):
            ret_str = ""
    elif file_name == "status":
        ret_str = vnfs_ops.vnfs_get_status(nf_path)
        if offset >= len(ret_str):
            ret_str = ""
    else:
        os.lseek(fh, offset, os.SEEK_SET)
        ret_str = os.read(fh, length)
    return ret_str
Esempio n. 22
0
    def __init__(self, num_bits, filename, max_bytes_in_memory):
        self.num_bits = num_bits
        num_chars = (self.num_bits + 7) // 8
        self.filename = filename
        self.max_bytes_in_memory = max_bytes_in_memory
        self.bits_in_memory = min(num_bits, self.max_bytes_in_memory * 8)
        self.bits_in_file = max(self.num_bits - self.bits_in_memory, 0)
        self.bytes_in_memory = (self.bits_in_memory + 7) // 8
        self.bytes_in_file = (self.bits_in_file + 7) // 8

        self.array_ = array.array('B', [0]) * self.bytes_in_memory
        flags = os.O_RDWR | os.O_CREAT
        if hasattr(os, 'O_BINARY'):
            flags |= getattr(os, 'O_BINARY')
        self.file_ = os.open(filename, flags)
        os.lseek(self.file_, num_chars + 1, os.SEEK_SET)
        os.write(self.file_, python2x3.null_byte)

        os.lseek(self.file_, 0, os.SEEK_SET)
        offset = 0
        intended_block_len = 2 ** 17
        while True:
            if offset + intended_block_len < self.bytes_in_memory:
                block = os.read(self.file_, intended_block_len)
            elif offset < self.bytes_in_memory:
                block = os.read(self.file_, self.bytes_in_memory - offset)
            else:
                break
            for index_in_block, character in enumerate(block):
                self.array_[offset + index_in_block] = ord(character)
            offset += intended_block_len
Esempio n. 23
0
def read( fd ):
    '''
    Returns: (serial_number, unpickled_object) or raises a FileCorrupted exception
    '''
    os.lseek(fd, 0, os.SEEK_SET)
        
    md5hash       = os.read(fd, 16)
    data1         = os.read(fd, 8)
    data2         = os.read(fd, 8)
    
    if ( (not md5hash or len(md5hash) != 16) or
         (not data1   or len(data1)   !=  8) or
         (not data2   or len(data2)   !=  8) ):
        raise FileTruncated()
    
    serial_number = struct.unpack('>Q', data1)[0]
    pickle_length = struct.unpack('>Q', data2)[0]

    data3         = os.read(fd, pickle_length)

    if not data3 or len(data3) != pickle_length:
        raise FileTruncated()

    m = hashlib.md5()
    m.update( data1 )
    m.update( data2 )
    m.update( data3 )
    
    if not m.digest() == md5hash:
        raise HashMismatch()
    
    return serial_number, pickle.loads(data3)
Esempio n. 24
0
File: ohds.py Progetto: htsst/ohds
 def write(self, data, offset, fd):
     self.iostat.n_write += 1
     rv = 0
     with self.rwlock:
         os.lseek(fd, offset, 0)
         rv = os.write(fd, data)
     return rv
Esempio n. 25
0
 def run(self):
     """Run the thread's portion of the test."""
     d = self.d
     ti = self.ti
     iop_size = ti.iop_size
     loclist = ti.loclist
     outfile = ti.outfile
     stop_event = ti.stop_event
     fails = 0
     for i in range(self.rank, self.ti.iop_cnt, self.ti.rthreads):
         if stop_event.is_set():
             break
         loc = loclist[i]
         os.lseek(d, loc, os.SEEK_SET)
         junk = os.read(d, iop_size)
         if len(junk) < iop_size:
             logging.warn('Short read!!! IOP %i', i)
             fails += 1
             if fails > 3:
                 break
             continue
         if junk != ti.iop_bytes(loc):
             logging.warn('Read does not match! IOP # %i', i)
             fails += 1
             if fails > 3:
                 break
     os.close(d)
Esempio n. 26
0
def count(delta, file_path):
    """
    Increments counter file and returns the max number of times the file
    has been modified. Counter data must be in the form:
    concurrent tasks, max concurrent tasks (counter should be initialized to 0,0)

    :param int delta: increment value
    :param str file_path: path to shared counter file
    :return int max concurrent tasks:
    """
    fd = os.open(file_path, os.O_RDWR)
    try:
        fcntl.flock(fd, fcntl.LOCK_EX)
        try:
            s = os.read(fd, 10)
            value, maxValue = map(int, s.split(','))
            value += delta
            if value > maxValue: maxValue = value
            os.lseek(fd, 0, 0)
            os.ftruncate(fd, 0)
            os.write(fd, ','.join(map(str, (value, maxValue))))
        finally:
            fcntl.flock(fd, fcntl.LOCK_UN)
    finally:
        os.close(fd)
    return maxValue
Esempio n. 27
0
File: ohds.py Progetto: htsst/ohds
 def read(self, size, offset, fd):
     self.iostat.n_read += 1
     rv = 0
     with self.rwlock:
         os.lseek(fd, offset, 0)
         rv = os.read(fd,size)
     return rv
Esempio n. 28
0
def _check_xml_stream(stream):
	xml_comment_start = '<!-- Base64 Signature'

	data_to_check = stream.read()

	last_comment = data_to_check.rfind('\n' + xml_comment_start)
	if last_comment < 0:
		raise SafeException(_("No signature block in XML. Maybe this file isn't signed?"))
	last_comment += 1	# Include new-line in data
	
	data = tempfile.TemporaryFile()
	data.write(data_to_check[:last_comment])
	data.flush()
	os.lseek(data.fileno(), 0, 0)

	errors = tempfile.TemporaryFile()

	sig_lines = data_to_check[last_comment:].split('\n')
	if sig_lines[0].strip() != xml_comment_start:
		raise SafeException(_('Bad signature block: extra data on comment line'))
	while sig_lines and not sig_lines[-1].strip():
		del sig_lines[-1]
	if sig_lines[-1].strip() != '-->':
		raise SafeException(_('Bad signature block: last line is not end-of-comment'))
	sig_data = '\n'.join(sig_lines[1:-1])

	if re.match('^[ A-Za-z0-9+/=\n]+$', sig_data) is None:
		raise SafeException(_("Invalid characters found in base 64 encoded signature"))
	try:
		sig_data = base64.decodestring(sig_data) # (b64decode is Python 2.4)
	except Exception, ex:
		raise SafeException(_("Invalid base 64 encoded signature: %s") % str(ex))
Esempio n. 29
0
    def process(self):
        ck = self.deq()
        try:
            fd = os.open(ck.filename, os.O_RDONLY)
        except OSError as e:
            self.logger.warn("%s, Skipping ... " % e, extra=self.d)
            return

        os.lseek(fd, ck.offset, os.SEEK_SET)
        digest = hashlib.sha1()
        blocksize = 4*1024*1024 # 4MiB block
        blockcount = ck.length / blocksize
        remaining = ck.length % blocksize
        for _ in xrange(blockcount):
            digest.update(readn(fd, blocksize))
        if remaining > 0:
            digest.update(readn(fd, remaining))
        try:
            os.close(fd)
        except Exception as e:
            self.logger.warn(e, extra=self.d)
        ck.digest = digest.hexdigest()
        #self.chunkq.append(ck)
        self.vsize += ck.length

        self.bfsign.insert_item(ck.digest)
Esempio n. 30
0
 def write(self, path, data, offset, fh):
     if fh in self.map_to_os_fd:
         os_fd = self.map_to_os_fd[fh]
         os.lseek(os_fd, offset, 0)
         return os.write(os_fd, data)
     else:
         raise OSError(EINVAL, '')
Esempio n. 31
0
 def write(self, path, buf, offset, fh):
     print(f"Write: {path}\n")
     os.lseek(fh, offset, os.SEEK_SET)
     return os.write(fh, buf)
Esempio n. 32
0
    def _clone_local(self, meter, size_bytes):
        if self._input_path == "/dev/null":
            # Not really sure why this check is here,
            # but keeping for compat
            logging.debug("Source dev was /dev/null. Skipping")
            return
        if self._input_path == self._output_path:
            logging.debug("Source and destination are the same. Skipping.")
            return

        # If a destination file exists and sparse flag is True,
        # this priority takes an existing file.

        if (not os.path.exists(self._output_path) and self._sparse):
            clone_block_size = 4096
            sparse = True
            fd = None
            try:
                fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT,
                             0o640)
                os.ftruncate(fd, size_bytes)
            finally:
                if fd:
                    os.close(fd)
        else:
            clone_block_size = 1024 * 1024 * 10
            sparse = False

        logging.debug("Local Cloning %s to %s, sparse=%s, block_size=%s",
                      self._input_path, self._output_path,
                      sparse, clone_block_size)

        zeros = '\0' * 4096

        src_fd, dst_fd = None, None
        try:
            try:
                src_fd = os.open(self._input_path, os.O_RDONLY)
                dst_fd = os.open(self._output_path,
                                 os.O_WRONLY | os.O_CREAT, 0o640)

                i = 0
                while 1:
                    l = os.read(src_fd, clone_block_size)
                    s = len(l)
                    if s == 0:
                        meter.end(size_bytes)
                        break
                    # check sequence of zeros
                    if sparse and zeros == l:
                        os.lseek(dst_fd, s, 1)
                    else:
                        b = os.write(dst_fd, l)
                        if s != b:
                            meter.end(i)
                            break
                    i += s
                    if i < size_bytes:
                        meter.update(i)
            except OSError as e:
                raise RuntimeError(_("Error cloning diskimage %s to %s: %s") %
                                (self._input_path, self._output_path, str(e)))
        finally:
            if src_fd is not None:
                os.close(src_fd)
            if dst_fd is not None:
                os.close(dst_fd)
Esempio n. 33
0
def missingExecutables(ppd):
    """
    Check that all relevant executables for a PPD are installed.

    @param ppd: PPD
    @type ppd: cups.PPD object
    @returns: string list, representing missing executables
    """

    # First, a local function.  How to check that something exists
    # in a path:
    def pathcheck(name, path="/usr/bin:/bin"):
        if name == "-":
            # A filter of "-" means that no filter is required,
            # i.e. the device accepts the given format as-is.
            return "builtin"
        # Strip out foomatic '%'-style place-holders.
        p = name.find('%')
        if p != -1:
            name = name[:p]
        if len(name) == 0:
            return "true"
        if name[0] == '/':
            if os.access(name, os.X_OK):
                _debugprint("%s: found" % name)
                return name
            else:
                _debugprint("%s: NOT found" % name)
                return None
        if name.find("=") != -1:
            return "builtin"
        if name in [
                ":", ".", "[", "alias", "bind", "break", "cd", "continue",
                "declare", "echo", "else", "eval", "exec", "exit", "export",
                "fi", "if", "kill", "let", "local", "popd", "printf", "pushd",
                "pwd", "read", "readonly", "set", "shift", "shopt", "source",
                "test", "then", "trap", "type", "ulimit", "umask", "unalias",
                "unset", "wait"
        ]:
            return "builtin"
        for component in path.split(':'):
            file = component.rstrip(os.path.sep) + os.path.sep + name
            if os.access(file, os.X_OK):
                _debugprint("%s: found" % file)
                return file
        _debugprint("%s: NOT found in %s" % (name, path))
        return None

    exes_to_install = []

    def add_missing(exe):
        # Strip out foomatic '%'-style place-holders.
        p = exe.find('%')
        if p != -1:
            exe = exe[:p]

        exes_to_install.append(exe)

    # Find a 'FoomaticRIPCommandLine' attribute.
    exe = exepath = None
    attr = ppd.findAttr('FoomaticRIPCommandLine')
    if attr:
        # Foomatic RIP command line to check.
        cmdline = attr.value.replace('&&\n', '')
        cmdline = cmdline.replace('&quot;', '"')
        cmdline = cmdline.replace('&lt;', '<')
        cmdline = cmdline.replace('&gt;', '>')
        if (cmdline.find("(") != -1 or cmdline.find("&") != -1):
            # Don't try to handle sub-shells or unreplaced HTML entities.
            cmdline = ""

        # Strip out foomatic '%'-style place-holders
        pipes = cmdline.split(';')
        for pipe in pipes:
            cmds = pipe.strip().split('|')
            for cmd in cmds:
                args = cmd.strip().split(' ')
                exe = args[0]
                exepath = pathcheck(exe)
                if not exepath:
                    add_missing(exe)
                    continue

                # Main executable found.  But if it's 'gs',
                # perhaps there is an IJS server we also need
                # to check.
                if os.path.basename(exepath) == 'gs':
                    argn = len(args)
                    argi = 1
                    search = "-sIjsServer="
                    while argi < argn:
                        arg = args[argi]
                        if arg.startswith(search):
                            exe = arg[len(search):]
                            exepath = pathcheck(exe)
                            if not exepath:
                                add_missing(exe)

                            break

                        argi += 1

            if not exepath:
                # Next pipe.
                break

    if exepath or not exe:
        # Look for '*cupsFilter' lines in the PPD and check that
        # the filters are installed.
        (tmpfd, tmpfname) = tempfile.mkstemp(text=True)
        os.unlink(tmpfname)
        ppd.writeFd(tmpfd)
        os.lseek(tmpfd, 0, os.SEEK_SET)
        f = os.fdopen(tmpfd, "rt")
        search = "*cupsFilter:"
        for line in f:
            if line.startswith(search):
                line = line[len(search):].strip().strip('"')
                try:
                    (mimetype, cost, exe) = line.split(' ')
                except:
                    continue

                exepath = pathcheck(
                    exe, config.cupsserverbindir + "/filter:"
                    "/usr/lib64/cups/filter")
                if not exepath:
                    add_missing(config.cupsserverbindir + "/filter/" + exe)

    return exes_to_install
Esempio n. 34
0
    def _make_changelog(self):
        """
        Create a new changelog entry in the spec, with line items from git
        """
        if self._no_auto_changelog:
            debug("Skipping changelog generation.")
            return

        in_f = open(self.spec_file, 'r')
        out_f = open(self.spec_file + ".new", 'w')

        found_changelog = False
        for line in in_f.readlines():
            out_f.write(line)

            if not found_changelog and line.startswith("%changelog"):
                found_changelog = True

                old_version = get_latest_tagged_version(self.project_name)

                # don't die if this is a new package with no history
                if old_version != None:
                    last_tag = "%s-%s" % (self.project_name, old_version)
                    output = self._generate_default_changelog(last_tag)
                else:
                    output = self._new_changelog_msg

                fd, name = tempfile.mkstemp()
                os.write(fd, "# Create your changelog entry below:\n")
                if self.git_email is None or (('HIDE_EMAIL' in self.user_config) and \
                        (self.user_config['HIDE_EMAIL'] not in ['0', ''])):
                    header = "* %s %s\n" % (self.today, self.git_user)
                else:
                    header = "* %s %s <%s>\n" % (self.today, self.git_user,
                                                 self.git_email)

                os.write(fd, header)

                for cmd_out in output.split("\n"):
                    os.write(fd, "- ")
                    os.write(fd, "\n  ".join(textwrap.wrap(cmd_out, 77)))
                    os.write(fd, "\n")

                os.write(fd, "\n")

                if not self._accept_auto_changelog:
                    # Give the user a chance to edit the generated changelog:
                    editor = 'vi'
                    if "EDITOR" in os.environ:
                        editor = os.environ["EDITOR"]
                    subprocess.call(editor.split() + [name])

                os.lseek(fd, 0, 0)
                file = os.fdopen(fd)

                for line in file.readlines():
                    if not line.startswith("#"):
                        out_f.write(line)

                output = file.read()

                file.close()
                os.unlink(name)

        if not found_changelog:
            print(
                "WARNING: no %changelog section find in spec file. Changelog entry was not appended."
            )

        in_f.close()
        out_f.close()

        shutil.move(self.spec_file + ".new", self.spec_file)
Esempio n. 35
0
 def write(self, path, buf, offset, fh):
     os.lseek(fh, offset, os.SEEK_SET)
     return os.write(fh, buf)
Esempio n. 36
0
 def read(self, path, length, offset, fh):
     os.lseek(fh, offset, os.SEEK_SET)
     return os.read(fh, length)
Esempio n. 37
0
#
# pos − This is the position in the file with respect to given parameter how. You give os.SEEK_SET or 0 to set the position relative to the beginning of the file, os.SEEK_CUR or 1 to set it relative to the current position; os.SEEK_END or 2 to set it relative to the end of the file.
#
# how − This is the reference point with-in the file. os.SEEK_SET or 0 means beginning of the file, os.SEEK_CUR or 1 means the current position and os.SEEK_END or 2 means end of the file.
#
# Return Value
#
# This method does not return any value.
# Example
import os, sys

# Open a file
fd = os.open("foo.txt", os.O_RDWR | os.O_CREAT)

# Write one string
os.write(fd, "This is test")

# Now you can use fsync() method.
# Infact here you would not be able to see its effect.
os.fsync(fd)

# Now read this file from the beginning
os.lseek(fd, 0, 0)
str = os.read(fd, 100)
print("Read String is : ", str)

# Close opened file
os.close(fd)

print("Closed the file successfully!!")
Esempio n. 38
0
    def file(self, path, stream=False, inline=False, name=None):
        """
        Returns a GZip compressed response with content of file located in ``path`` and correct headers

        :type path: str
        :type stream: bool
        """

        # Block path traversal
        if '..' in path:
            self.respond_forbidden()
            return

        if not os.path.isfile(path):
            self.respond_not_found()
            return

        content_types = {
            '.html': 'text/html',
            '.css': 'text/css',
            '.js': 'application/javascript',
            '.png': 'image/png',
            '.jpg': 'image/jpeg',
            '.svg': 'image/svg+xml',
            '.woff': 'application/x-font-woff',
            '.pdf': 'application/pdf',
        }

        ext = os.path.splitext(path)[1]
        if ext in content_types:
            self.add_header('Content-Type', content_types[ext])
        else:
            self.add_header('Content-Type', 'application/octet-stream')

        mtime = datetime.utcfromtimestamp(math.trunc(os.path.getmtime(path)))

        rtime = self.env.get('HTTP_IF_MODIFIED_SINCE', None)
        if rtime:
            try:
                rtime = datetime.strptime(rtime, '%a, %b %d %Y %H:%M:%S GMT')
                if mtime <= rtime:
                    self.respond('304 Not Modified')
                    return
            except:
                pass

        http_range = self.env.get('HTTP_RANGE', None)
        range_from = range_to = None
        if http_range and http_range.startswith('bytes'):
            rsize = os.stat(path).st_size
            range_from, range_to = http_range.split('=')[1].split('-')
            range_from = int(range_from) if range_from else 0
            range_to = int(range_to) if range_to else (rsize - 1)
        else:
            range_from = 0
            range_to = 999999999

        self.add_header('Last-Modified',
                        mtime.strftime('%a, %b %d %Y %H:%M:%S GMT'))
        self.add_header('Accept-Ranges', 'bytes')

        name = name or os.path.split(path)[-1].encode()

        if inline:
            self.add_header('Content-Disposition',
                            b'inline; filename=%s' % name)
        else:
            self.add_header('Content-Disposition',
                            b'attachment; filename=%s' % name)

        if stream:
            if range_from:
                self.add_header('Content-Length',
                                str(range_to - range_from + 1))
                self.add_header(
                    'Content-Range',
                    'bytes %i-%i/%i' % (range_from, range_to, rsize))
                self.respond('206 Partial Content')
            else:
                self.respond_ok()
            fd = os.open(path, os.O_RDONLY)
            os.lseek(fd, range_from or 0, os.SEEK_SET)
            bufsize = 100 * 1024
            read = range_from
            buf = 1
            while buf:
                buf = os.read(fd, bufsize)
                gevent.sleep(0)
                if read + len(buf) > range_to:
                    buf = buf[:range_to + 1 - read]
                yield buf
                read += len(buf)
                if read >= range_to:
                    break
            os.close(fd)
        else:
            content = open(path, 'rb').read()
            yield self.gzip(content)
Esempio n. 39
0
 def seek(self, offset, whence=os.SEEK_SET):
     return os.lseek(self._fd, offset, whence)
Esempio n. 40
0
 def read(self, path, length, offset, fh):
     print(f"Read: {path}\n")
     os.lseek(fh, offset, os.SEEK_SET)
     return os.read(fh, length)
Esempio n. 41
0
 def tell(self):
     self.flush()
     try:
         return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
     except OSError as e:
         raise IOError(*e.args)
Esempio n. 42
0
 def read_all(f):
     os.lseek(f.fileno(), 0, 0)
     return f.read().strip()
 def write(self, path, buf, offset, fh):
     print("SSS->write: ", path, buf, offset, fh)
     os.lseek(fh, offset, os.SEEK_SET)
     return os.write(fh, buf)
Esempio n. 44
0
import os

try:
    fd1 = os.open("Makefile", os.O_RDWR)

    pos = os.lseek(fd1, 100, os.SEEK_SET)

    fd2 = os.dup(fd1)

    pos = os.lseek(fd2, 100, os.SEEK_CUR)

    # print("pos = " + pos);

    os.close(fd1)

except Exception as e:
    print("Error: {}".format(e))
Esempio n. 45
0
 def lseek(self, lseek_offset: int, lseek_origin: int = os.SEEK_SET) -> int:
     return os.lseek(self.__fd, lseek_offset, lseek_origin)
Esempio n. 46
0
 def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
     if total_sent > 0:
         os.lseek(fileno, offset, os.SEEK_SET)
Esempio n. 47
0
 def read(self, path, length, offset, fh):
     print '** read:', path, '**'
     os.lseek(fh, offset, os.SEEK_SET)
     return os.read(fh, length)
 def read(self, path, length, offset, fh):
     print("SSS->read: ", path, length, offset, fh)
     os.lseek(fh, offset, os.SEEK_SET)
     return os.read(fh, length)
Esempio n. 49
0
 async def read(self, fd, offset, length):
     os.lseek(fd, offset, os.SEEK_SET)
     return os.read(fd, length)
Esempio n. 50
0
 def write(self, path, buf, offset, fh):
     print '** write:', path, '**'
     os.lseek(fh, offset, os.SEEK_SET)
     return os.write(fh, buf)
    def getFile(self, *args, **kwds):
        ''' Keeping this as an alternative for the code.
            We don't use it because it's not possible to know if the call was a
            PK-one (and so we push the content of a temporary filename to fd or
            file) or a non-PK-one (in which case nothing should be done).

                filename = None
                fd = None
                file = None
                if use_pycups:
                    if len(kwds) != 1:
                        use_pycups = True
                    elif kwds.has_key('filename'):
                        filename = kwds['filename']
                    elif kwds.has_key('fd'):
                        fd = kwds['fd']
                    elif kwds.has_key('file'):
                        file = kwds['file']
                    else:
                        use_pycups = True

                    if fd or file:
        '''

        file_object = None
        fd = None
        if len(args) == 2:
            (use_pycups, resource, filename) = self._args_to_tuple([str, str], *args)
        else:
            (use_pycups, resource) = self._args_to_tuple([str], *args)
            if 'filename' in kwds:
                filename = kwds['filename']
            elif 'fd' in kwds:
                fd = kwds['fd']
            elif 'file' in kwds:
                file_object = kwds['file']
            else:
                if not use_pycups:
                    raise TypeError()
                else:
                    filename = None

        if (not use_pycups) and (fd is not None or file_object is not None):
            # Create the temporary file in /tmp to ensure that
            # cups-pk-helper-mechanism is able to write to it.
            (tmpfd, tmpfname) = tempfile.mkstemp(dir="/tmp")
            os.close (tmpfd)

            pk_args = (resource, tmpfname)
            self._call_with_pk_and_fallback(use_pycups,
                                            'FileGet', pk_args,
                                            self._connection.getFile,
                                            *args, **kwds)

            tmpfd = os.open (tmpfname, os.O_RDONLY)
            tmpfile = os.fdopen (tmpfd, 'rt')
            tmpfile.seek (0)

            if fd is not None:
                os.lseek (fd, 0, os.SEEK_SET)
                line = tmpfile.readline()
                while line != '':
                    os.write (fd, line.encode('UTF-8'))
                    line = tmpfile.readline()
            else:
                file_object.seek (0)
                line = tmpfile.readline()
                while line != '':
                    file_object.write (line.encode('UTF-8'))
                    line = tmpfile.readline()

            tmpfile.close ()
            os.remove (tmpfname)
        else:
            pk_args = (resource, filename)

            self._call_with_pk_and_fallback(use_pycups,
                                            'FileGet', pk_args,
                                            self._connection.getFile,
                                            *args, **kwds)
Esempio n. 52
0
    async def write(self, fd, offset, buf):
        # ff=gpioint
        # f=gpio
        gpiobyte = buf.decode()
        print("buf: ", buf, "gpiobyte: ", gpiobyte)
        print("self: ", self, "offset: ", offset)

        print("export: ", export)
        print("unexport: ", unexport)
        print("direction: ", direction)
        print("value: ", value)
        print("activelow: ", active)
        print("edge: ", edge)

        if export[0] == 1 and unexport[0] == 0:
            print("1export")

            gpioint = int(gpiobyte)
            print("write\n buf: ", buf, "fd: ", fd)
            print("numerogpiobyte: ", gpiobyte)
            print("numerogpiointero: ", gpioint)

            gpio = str(gpioint)
            listgpio = listgpio2
            print("la lista di gpio disponibili per test2 è: ", listgpio, "\n")

            print(os.path.exists('/sys/class/gpio/gpio' + gpio + '/'))
            if gpioint in listgpio and not os.path.exists(
                    '/sys/class/gpio/gpio' + gpio + '/'):
                os.lseek(fd, offset, os.SEEK_SET)
                return os.write(fd, buf)
            else:
                print("errore")
                os.lseek(fd, offset, os.SEEK_SET)
                return 1
        elif export[0] == 0 and unexport[0] == 1:
            print("1unexport")

            gpioint = int(gpiobyte)
            print("write\n buf: ", buf, "fd: ", fd)
            print("numerogpiobyte: ", gpiobyte)
            print("numerogpiointero: ", gpioint)

            gpio = str(gpioint)
            listgpio = listgpio2
            print("la lista di gpio disponibili per test2 è: ", listgpio, "\n")

            print('/sys/class/gpio/gpio' + gpio + '/')
            print(os.path.exists('/sys/class/gpio/gpio' + gpio + '/'))
            if gpioint in listgpio and os.path.exists('/sys/class/gpio/gpio' +
                                                      gpio + '/'):
                os.lseek(fd, offset, os.SEEK_SET)
                return os.write(fd, buf)
            else:
                print("errore")
                os.lseek(fd, offset, os.SEEK_SET)
                return 1
        # elif export[0] == 0 and unexport[0] == 0:
        #    print("1indefinito")
        else:

            print("1non dovrebbe capitare: echo direction value\n")
            print("buff:", buf)
            print("buf decodificato:", gpiobyte)
            gpiobyte = gpiobyte.strip('\n')
            print("buf decodificato senzsa\\n:", gpiobyte)

            if direction[0] == 1:
                print("caso direction:")
                if gpiobyte == 'in' or gpiobyte == 'out':
                    os.lseek(fd, offset, os.SEEK_SET)
                    return os.write(fd, buf)
                else:
                    print("errore")

                    pathmod = os.path.relpath(
                        ultimopath[0],
                        '/sys/devices/platform/soc/3f200000.gpio/gpiochip0/gpio/'
                    )
                    basename = os.path.basename(ultimopath[0])
                    basename = '/' + basename
                    print("\n\n\npathmod:", pathmod, "basename:", basename,
                          "\n\n\n")
                    gpio = os.path.dirname(pathmod)
                    print("gpio:", gpio)

                    valoredirezione = os.popen('cat /sys/class/gpio/' + gpio +
                                               '/direction').read()
                    valoredirezione = valoredirezione.strip('\n')

                    # print("valore direzione: ", valoredirezione)
                    # print('echo '+valoredirezione+' > /gpio_mnt/test2/sys/class/gpio/'+gpio+'/direction')
                    # cmd = 'echo '+valoredirezione+' > /gpio_mnt/test2/sys/class/gpio/'+gpio+'/direction'
                    # print(cmd)

                    # os.popen('./riavvio.sh test2').read()
                    os.system('./riavvio.sh test2')

                    # os.lseek(fd, offset, os.SEEK_SET)
                    return 1
            elif value[0] == 1:
                if gpiobyte == '1' or gpiobyte == '0':
                    gpioint = int(gpiobyte)
                    gpio = str(gpioint)
                    print("caso value:")

                    pathmod = os.path.relpath(
                        ultimopath[0],
                        '/sys/devices/platform/soc/3f200000.gpio/gpiochip0/gpio/'
                    )
                    basename = os.path.basename(ultimopath[0])
                    basename = '/' + basename
                    print("\n\n\npathmod:", pathmod, "basename:", basename,
                          "\n\n\n")
                    gpio = os.path.dirname(pathmod)
                    print("gpio:", gpio)

                    valoredirezione = os.popen('cat /sys/class/gpio/' + gpio +
                                               '/direction').read()
                    valoredirezione = valoredirezione.strip('\n')
                    print("valore direzione: ", valoredirezione)

                    if valoredirezione == 'out':
                        if gpiobyte == '0' or gpiobyte == '1':
                            os.lseek(fd, offset, os.SEEK_SET)
                            return os.write(fd, buf)
                        else:
                            print("errore valori non accettati")
                            os.lseek(fd, offset, os.SEEK_SET)
                            os.system('./riavvio.sh test2')
                            return 1
                    else:
                        print(
                            "errore: non puoi modificare value se direction è in!"
                        )
                        os.lseek(fd, offset, os.SEEK_SET)
                        os.system('./riavvio.sh test2')
                        return 1
                else:
                    print("errore carattere")
                    os.system('./riavvio.sh test2')
                    return 1

            elif active[0] == 1:
                print("caso active:")
                if gpiobyte == '0' or gpiobyte == '1':
                    os.lseek(fd, offset, os.SEEK_SET)
                    return os.write(fd, buf)
                else:
                    print("errore")
                    os.lseek(fd, offset, os.SEEK_SET)
                    return 1

            else:
                print("errore2")
                os.lseek(fd, offset, os.SEEK_SET)
                return 1
            '''
Esempio n. 53
0
 def read(self, path, size, offset, fh):
     with self.rwlock:
         os.lseek(fh, offset, 0)
         return os.read(fh, size)
Esempio n. 54
0
 def lseek(self, fd, offset, whence):
     if (fd == None):
         return True
     return os.lseek(fd, offset, whence)
Esempio n. 55
0
def delete_content(fd):
    os.ftruncate(fd, 0)
    os.lseek(fd, 0, os.SEEK_SET)
Esempio n. 56
0
 def seek(self, offset):
     try:
         os.lseek(self.fd, offset, os.SEEK_SET)
     except OSError, e:
         print "Seek error: %s" % (e.message)
Esempio n. 57
0
 def native_read_phys_mem(self, phys_address_hi, phys_address_lo, length):
     if self.devmem_available():
         addr = (phys_address_hi << 32) | phys_address_lo
         os.lseek(self.dev_mem, addr, os.SEEK_SET)
         return os.read(self.dev_mem, length)
Esempio n. 58
0
 def write(self, path, data, offset, fh):
     with self.rwlock:
         os.lseek(fh, offset, 0)
         return os.write(fh, data)
 def _handle_lseek(self, mu, fd, offset, whence):
     return os.lseek(fd, offset, whence)
Esempio n. 60
0
 def native_read_mmio_reg(self, phys_address, size):
     if self.devmem_available():
         os.lseek(self.dev_mem, phys_address, os.SEEK_SET)
         reg = os.read(self.dev_mem, size)
     return struct.unpack(('=%c' % chipsec.defines.SIZE2FORMAT[size]),
                          reg)[0]