示例#1
0
    def worker(self, timeInterval, keepWorking):
        #  keeps looking at the queue and empties it to the database
        # keepWorking is an event set in the main thread to allow stoping
        while keepWorking.is_set():
            self.datafile = path + 'DavisVP2_' + datetime.datetime.now().strftime('%m-%d-%y') + '.xlsx'
            self.logfile = path + 'status_' + datetime.datetime.now().strftime('%m-%d-%y') + '.log'
            try:
                if os.path.exists(self.logfile):
                    self.fd = BufferedWriter(FileIO(self.logfile, 'ab'))
                else:
                    self.fd = BufferedWriter(FileIO(self.logfile, 'wb'))

            except IOError:
                self.log.error("Cannot create log file %s" % self.logfile)
            try:
                if os.path.exists(self.datafile):
                    self.fd = BufferedWriter(FileIO(self.logfile, 'ab'))
                else:
                    self.fd = BufferedWriter(FileIO(self.logfile, 'wb'))

            except IOError:
                self.log.error("Cannot create data file %s" % self.datafile)

            self.check_conditions(self.datafile)
            time.sleep(timeInterval)
示例#2
0
        def handle(self):
            """
            Handle a single HTTP request. Shamelessly copied from Python
            3.5 wsgiref simple_server. Adjusted the SimpleHandler to set
            multithread=False.
            """
            self.raw_requestline = self.rfile.readline(65537)
            if len(self.raw_requestline) > 65536:
                self.requestline = ''
                self.request_version = ''
                self.command = ''
                self.send_error(414)
                return

            if not self.parse_request():  # an error code has been sent, exit
                return

            # Avoid passing the raw file object wfile, which can do partial
            # writes (Issue 24291)
            stdout = BufferedWriter(self.wfile)
            try:
                handler = MySimpleHandler(
                    self.rfile, stdout, self.get_stderr(), self.get_environ(),
                    multithread=False, multiprocess=False)
                handler.request_handler = self      # backpointer for logging
                handler.run(self.server.get_app())
            finally:
                stdout.detach()
示例#3
0
def stream_compress(instr: io.BufferedReader,
                    outstr: io.BufferedWriter,
                    chunk_size=DEFAULT_MAX_CHUNK):
    """
    A stream processor that call compress on bytes available in instr
    And write them into outstr
    :param instr: buffered reader
    :param outstr: buffered writer
    :param chunk: the sizeof chunk to read at one time. if 0 attempt to read as much as possible.
    :returns: original consumed data size, compressed data size
    """
    orig_data_size: int = 0
    comp_data_size: int = 0
    inbytes: bytes = instr.read(chunk_size)
    while inbytes:

        data_comp = compress(inbytes)

        # we prepend with uncompressed data chunk size
        # to be used later for random access

        orig_data_size += len(inbytes)

        # '>H' is unsigned short format, fit on 2 bytes.
        output = struct.pack('>H', len(inbytes)) + struct.pack(
            '>H', len(data_comp)) + data_comp
        # we need to include the chunk indexes in the compressed size
        comp_data_size += len(output)

        outstr.write(output)

        # keep consuming data, in case more is available...
        inbytes = instr.read(chunk_size)

    return orig_data_size, comp_data_size
示例#4
0
        def handle(self):
            """
            Handle a single HTTP request. Shamelessly copied from Python
            3.5 wsgiref simple_server. Adjusted the SimpleHandler to set
            multithread=False.
            """
            self.raw_requestline = self.rfile.readline(65537)
            if len(self.raw_requestline) > 65536:
                self.requestline = ''
                self.request_version = ''
                self.command = ''
                self.send_error(414)
                return

            if not self.parse_request():  # an error code has been sent, exit
                return

            # Avoid passing the raw file object wfile, which can do partial
            # writes (Issue 24291)
            stdout = BufferedWriter(self.wfile)
            try:
                handler = MySimpleHandler(
                    self.rfile, stdout, self.get_stderr(), self.get_environ(),
                    multithread=False, multiprocess=False)
                handler.request_handler = self      # backpointer for logging
                handler.run(self.server.get_app())
            finally:
                stdout.detach()
示例#5
0
 def setup(self, filename, *args, **kwargs):
     self._path = os.path.join('tracker/media', self.UPLOAD_DIR, filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "a"))
示例#6
0
    def __init__(self, stream=None):
        if not stream:
            stream = BytesIO()

        self.stream = stream
        self.writer = BufferedWriter(self.stream)
        self.written_count = 0
示例#7
0
文件: content.py 项目: nabcos/acd_cli
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args: write_callback"""
    r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True)
    if r.status_code not in OK_CODES:
        raise RequestError(r.status_code, r.text)

    write_callback = kwargs.get('write_callback', None)

    total_ln = int(r.headers.get('content-length'))
    length = kwargs.get('length', None)
    if length and total_ln != length:
        logging.info('Length mismatch: argument %d, content %d' % (length, total_ln))

    pgo = progress.Progress()
    curr_ln = 0
    try:
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                if write_callback:
                    write_callback(chunk)
                curr_ln += len(chunk)
                pgo.print_progress(total_ln, curr_ln)
    except (ConnectionError, ReadTimeoutError) as e:
        raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())
    print()  # break progress line
    r.close()
    return
示例#8
0
    def handle(self):
        """Handle a single HTTP request"""

        self.raw_requestline = self.rfile.readline(65537)
        if len(self.raw_requestline) > 65536:
            self.requestline = ''
            self.request_version = ''
            self.command = ''
            self.send_error(414)
            return

        if not self.parse_request(): # An error code has been sent, just exit
            return

        # Avoid passing the raw file object wfile, which can do partial
        # writes (Issue 24291)
        stdout = BufferedWriter(self.wfile)
        try:
            handler = ServerHandler(
                self.rfile, stdout, self.get_stderr(), self.get_environ()
            )
            handler.request_handler = self      # backpointer for logging
            handler.run(self.server.get_app())
        finally:
            stdout.detach()
示例#9
0
 def setup(self, filename, *args, **kwargs):
     self._path = self.get_path(filename, *args, **kwargs)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#10
0
 def setup(self, filename):
     self._path = os.path.join(settings.MEDIA_ROOT, filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#11
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = ajaxuploader_settings.UPLOAD_DIRECTORY
    # TODO: allow this to be overridden per-widget/view

    def setup(self, filename):
        self._relative_path = os.path.normpath(
            os.path.join(
                force_unicode(
                    datetime.datetime.now().strftime( # allow %Y, %s, etc
                        smart_str(self.UPLOAD_DIR))),
                filename))
        self._path = os.path.join(settings.MEDIA_ROOT, self._relative_path)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()
        return {"path": self._relative_path}

    def update_filename(self, request, filename):
        return ajaxuploader_settings.SANITIZE_FILENAME(filename)
示例#12
0
def send_packet(writer: io.BufferedWriter,
                cmd: int,
                flags: int = 0,
                data: bytes = b'') -> None:
    packet = Packet(cmd, flags, len(data), data, protocol_utils.Formats.HEADER)
    writer.write(packet.to_bytes())
    writer.flush()
示例#13
0
class AbstractUploadBackend(object):
    BUFFER_SIZE = 10485760  # 10MB

    def __init__(self, **kwargs):
        self._timedir = get_date_directory()
        self.__dict__.update(kwargs)

    def update_filename(self, request, filename):
        """Returns a new name for the file being uploaded."""
        self.oldname = filename
        ext = os.path.splitext(filename)[1]
        return md5(filename.encode('utf8')).hexdigest() + ext

    def upload_chunk(self, chunk):
        """Called when a string was read from the client, responsible for
        writing that string to the destination file."""
        self._dest.write(chunk)

    def max_size(self):
        """
        Checking file max size
        """
        if int(self._dest.tell()) > self.upload_size:
            self._dest.close()
            os.remove(self._path)
            return True

    def upload(self, uploaded, filename, raw_data):
        try:
            if raw_data:
                # File was uploaded via ajax, and is streaming in.
                chunk = uploaded.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self.upload_chunk(chunk)
                    if self.max_size():
                        return False
                    chunk = uploaded.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in uploaded.chunks():
                    self.upload_chunk(chunk)
                    if self.max_size():
                        return False
            return True
        except:
            # things went badly.
            return False

    def setup(self, filename):
        self._path = os.path.join(settings.MEDIA_ROOT, self.upload_dir, self._timedir, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_complete(self, request, filename):
        path = self.upload_dir + "/" + self._timedir + "/" + filename
        self._dest.close()
        return {"path": path, 'oldname': self.oldname}
示例#14
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter,
                         **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get('write_callbacks', [])

        chunk_start = kwargs.get('offset', 0)
        length = kwargs.get('length', 100 * 1024**4)

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + CHUNK_SIZE - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= CHUNK_MAX_RETRY:
                raise RequestError(
                    RequestError.CODE.FAILED_SUBREQUEST,
                    '[acd_api] Downloading chunk failed multiple times.')
            r = self.BOReq.get(
                self.content_url + 'nodes/' + node_id + '/content',
                stream=True,
                acc_codes=ok_codes,
                headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

            logger.debug('Range %d-%d' % (chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                r.close()
                logger.debug('Invalid byte range requested %d-%d' %
                             (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug('Chunk [%d-%d], retry %d.' %
                              (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()

            chunk_start += CHUNK_SIZE
            retries = 0

        return
示例#15
0
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args: write_callback"""
    r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content',
                           stream=True)
    if r.status_code not in OK_CODES:
        raise RequestError(r.status_code, r.text)

    write_callback = kwargs.get('write_callback', None)

    total_ln = int(r.headers.get('content-length'))
    length = kwargs.get('length', None)
    if length and total_ln != length:
        logging.info('Length mismatch: argument %d, content %d' %
                     (length, total_ln))

    pgo = progress.Progress()
    curr_ln = 0
    try:
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                if write_callback:
                    write_callback(chunk)
                curr_ln += len(chunk)
                pgo.print_progress(total_ln, curr_ln)
    except (ConnectionError, ReadTimeoutError) as e:
        raise RequestError(RequestError.CODE.READ_TIMEOUT,
                           '[acd_cli] Timeout. ' + e.__str__())
    print()  # break progress line
    r.close()
    return
示例#16
0
class PANDAUploadBackend(AbstractUploadBackend):
    """
    Customized backend to handle AJAX uploads.
    """
    def update_filename(self, request, filename):
        """
        Verify that the filename is unique, if it isn't append and iterate
        a counter until it is.
        """
        self._original_filename = filename

        filename = self._original_filename
        root, ext = os.path.splitext(self._original_filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)

        i = 1

        while os.path.exists(path):
            filename = '%s%i%s' % (root, i, ext)
            path = os.path.join(settings.MEDIA_ROOT, filename)
            i += 1

        return filename 

    def setup(self, filename):
        """
        Open the destination file for writing.
        """
        self._path = os.path.join(settings.MEDIA_ROOT, filename)

        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass

        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        """
        Write a chunk of data to the destination.
        """
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        """
        Close the destination file and create an Upload object in the
        database recording its existence.
        """
        self._dest.close()

        root, ext = os.path.splitext(filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)
        size = os.path.getsize(path)

        upload = Upload.objects.create(
            filename=filename,
            original_filename=self._original_filename,
            size=size)

        return { 'id': upload.id }
示例#17
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")

    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR,
                                  filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self, request, filename, *args, **kwargs):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename, *args, **kwargs):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        filename = os.path.basename(filename)
        self._dir = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(
                            filename)
                        open(
                            os.path.join(
                                self._dir, filename_no_extension +
                                str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            return filename
        else:
            return filename_no_extension + str(filename_suffix) + extension

    @property
    def path(self):
        """
        Return a path of file uploaded
        """
        return self._path
    def __init__(self):


        self.start_flying = True
        self.stop_flying = False
        self.return_to_home = False
        self.is_takeoff = False
        # self.PID = SimplePID()

        self.drone_position_x = 0
        self.drone_position_y = 0
        self.drone_position_z = 0

        self.drone_velocity_x = 0
        self.drone_velocity_y = 0
        self.drone_velocity_z = 0

        self.drone_acceleration_x = 0
        self.drone_acceleration_y = 0
        self.drone_acceleration_z = 0

        self.target_position_x = 0
        self.target_position_y = 0
        self.target_position_z = 0

        self.target_velocity_x = 0
        self.target_velocity_y = 0
        self.target_velocity_z = 0

        self.drone_yaw = 0
        self.drone_yaw_radians = 0

        self.vx = 0
        self.vy = 0
        self.vx1 = 0
        self.vy1 = 0

        self.ax = 0
        self.ay = 0

        self.controller = BasicDroneController()
        self.subNavdata = rospy.Subscriber('/ardrone/navdata',Navdata,self.ReceiveNavdata) 
        
        self.logger = logging.getLogger('LQR_simulation')
        self.fileHandler_message = logging.StreamHandler(BufferedWriter(FileIO("LQR_simulation_data" + time.strftime("%Y%m%d-%H%M%S") + ".log", "w")))
        self.logger.addHandler(self.fileHandler_message)
        self.formatter_message = logging.Formatter('%(message)s')
        self.fileHandler_message.setFormatter(self.formatter_message)
        self.logger.setLevel(LoggerWarningLevel)
        self.logger.info('Time;target_position_x,target_position_y,target_position_z;target_velocity_x,target_velocity_y,target_velocity_z;drone_position_x,drone_position_y,drone_position_z;drone_velocity_x,drone_velocity_y,drone_velocity_z,vx1,vy1,ax,ay')

        self.logger_land = logging.getLogger('LQR_simulation_land')
        self.fileHandler_message = logging.StreamHandler(BufferedWriter(FileIO("LQR_simulation_PD_land_data" + time.strftime("%Y%m%d-%H%M%S") + ".log", "w")))
        self.logger_land.addHandler(self.fileHandler_message)
        self.formatter_message = logging.Formatter('%(message)s')
        self.fileHandler_message.setFormatter(self.formatter_message)
        self.logger_land.setLevel(LoggerWarningLevel)
        self.logger_land.info('Time;target_position_x,target_position_y,target_position_z;target_velocity_x,target_velocity_y,target_velocity_z;drone_position_x,drone_position_y,drone_position_z;drone_velocity_x,drone_velocity_y,drone_velocity_z,vx1,vy1,ax,ay')
示例#19
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get("write_callbacks", [])

        chunk_start = kwargs.get("offset", 0)
        length = kwargs.get("length", 100 * 1024 ** 4)

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + CHUNK_SIZE - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= CHUNK_MAX_RETRY:
                raise RequestError(
                    RequestError.CODE.FAILED_SUBREQUEST, "[acd_api] Downloading chunk failed multiple times."
                )
            r = self.BOReq.get(
                self.content_url + "nodes/" + node_id + "/content",
                stream=True,
                acc_codes=ok_codes,
                headers={"Range": "bytes=%d-%d" % (chunk_start, chunk_end)},
            )

            logger.debug("Range %d-%d" % (chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                logger.debug("Invalid byte range requested %d-%d" % (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug("Chunk [%d-%d], retry %d." % (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()

            chunk_start += CHUNK_SIZE
            retries = 0

        return
示例#20
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")


    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self, request, filename, *args, **kwargs):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename, *args, **kwargs):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        filename = os.path.basename(filename)
        self._dir = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            return filename
        else:
            return filename_no_extension + str(filename_suffix) + extension

    @property
    def path(self):
        """
        Return a path of file uploaded
        """
        return self._path
示例#21
0
def save_file(file: io.BufferedWriter):
    """Save file without closing it

    Args:
        file (:obj:`io.BufferedWriter`): A file-like object
    """
    file.flush()
    os.fsync(file.fileno())
    file.seek(0)
示例#22
0
文件: ajax.py 项目: ukaoma/nnmware
 def setup(self, filename):
     ext = os.path.splitext(filename)[1]
     self._filename = md5(filename.encode('utf8')).hexdigest() + ext
     self._path = os.path.join(self._upload_dir, self._filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._destination = BufferedWriter(FileIO(self._path, "w"))
示例#23
0
    def _encode(cls, file: BufferedReader, archive_file: BufferedWriter,
                file_path: str):

        encoding_dictionary = cls.get_encoding_dictionary(file)
        archive_file.write(cls._get_file_path_data(file_path))
        archive_file.write(cls._get_dictionary_data(encoding_dictionary))
        archive_file.write(cls._compose_data(cls._get_control_sum(file)))

        cls._write_encoded_file_data(file, archive_file, encoding_dictionary)
示例#24
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get('write_callbacks', [])

        chunk_start = kwargs.get('offset', 0)
        length = kwargs.get('length', 100 * 1024 ** 4)

        dl_chunk_sz = self._conf.getint('transfer', 'dl_chunk_size')

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + dl_chunk_sz - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= self._conf.getint('transfer', 'chunk_retries'):
                raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                                   '[acd_api] Downloading chunk failed multiple times.')
            r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

            logger.debug('Node "%s", range %d-%d' % (node_id, chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                r.close()
                logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()
                chunk_start = file.tell()

            retries = 0

        return
示例#25
0
 def _extract(self, frame:ImageFrame, dstfile:io.BufferedWriter, imgfile:io.BufferedReader, **kwargs):
     """override optional (if no encryption/compression)"""
     imgfile.seek(frame.offset)
     dstdata = imgfile.read(frame.length)
     if len(dstdata) != frame.length:
         raise ValueError('Image Frame data length does not match, expected {0.length!r} bytes, not {1!r} bytes'.format(frame, len(dstdata)))
     if dstfile is None:
         return dstdata
     else:
         dstfile.write(dstdata)
示例#26
0
    def __init__(self, stream=None, known_length=None):
        if not stream:
            stream = BytesIO()

        if known_length is None:
            # On some systems, DEFAULT_BUFFER_SIZE defaults to 8192
            # That's over 16 times as big as necessary for most messages
            known_length = max(DEFAULT_BUFFER_SIZE, 1024)

        self.writer = BufferedWriter(stream, buffer_size=known_length)
        self.written_count = 0
示例#27
0
    def file_rollover(self, t):
        """ rollover the file if necessary
        """
        if self.rollover_time == None:
            # Not initialized yet... set the current day and next rollover
            self.rollover_time = self.rollover_ref + int(
                (t - self.rollover_ref) / 86400) * 86400

        if self.rollover_time <= t:
            # Close the currently open file
            if (self.fd != None):
                self.log.info("Closing %s" % self.fd.name)
                self.fd.close()
                self.fd = None

            Day = strftime("%Y_%m_%d", gmtime(self.rollover_time))
            Year = Day[0:4]
            fileBase = "%s_%s" % (self.filePrefix, Day)
            yearPath = os.path.join(self.root_archive_dir, Year)
            if not os.path.exists(yearPath):
                try:
                    os.makedirs(yearPath)
                except os.error as e:
                    self.log.error("Cannot create %s : %s" %
                                   (yearPath, str(e)))
                    self.log.error("Redirecting to /tmp")
                    yearPath = os.path.join('/tmp', Year)
                    if not os.path.exist(yearPath):
                        try:
                            os.makedirs(yearPath)
                        except os.error as e:
                            self.log.error("Cannot create %s either: %s" %
                                           (yearPath, str(e)))
            if os.path.exists(yearPath):
                filePath = os.path.join(yearPath, fileBase + ".log")
                try:
                    if os.path.exists(filePath):
                        self.fd = BufferedWriter(FileIO(filePath, 'ab'))
                    else:
                        self.fd = BufferedWriter(FileIO(filePath, 'wb'))
                except IOError:
                    self.log.error("Cannot create log file %s" % filePath)
            if os.path.exists(yearPath) and self.raw:
                filePath = os.path.join(yearPath, fileBase + ".raw")
                try:
                    if os.path.exists(filePath):
                        self.fdRaw = BufferedWriter(FileIO(filePath, 'ab'))
                    else:
                        self.fdRaw = BufferedWriter(FileIO(filePath, 'wb'))
                except IOError:
                    self.log.error("Cannot create log file %s" % filePath)

            self.rollover_time += 86400
示例#28
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        self._dir = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        print "orig filename: " + os.path.join(self._dir, filename)

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        print "filename all ready exists. Trying  " + filename_no_extension + str(filename_suffix) + extension
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            print "using filename: " + os.path.join(self._dir, filename)
            return filename
        else:
            print "using filename: " + filename_no_extension + str(filename_suffix) + extension
            return filename_no_extension + str(filename_suffix) + extension
示例#29
0
 def _extract(self, entry: ArchiveEntry, dstfile: io.BufferedWriter,
              arcfile: io.BufferedReader, **kwargs):
     """override optional (if no encryption/compression)"""
     arcfile.seek(entry.offset)
     dstdata = arcfile.read(entry.length)
     if len(dstdata) != entry.length:
         raise ValueError(
             'Archive Entry data length does not match, expected {0.length!r} bytes, not {1!r} bytes'
             .format(entry, len(dstdata)))
     if dstfile is None:
         return dstdata
     else:
         dstfile.write(dstdata)
示例#30
0
 def setup(self, filename):
     ext = os.path.splitext(filename)[1]
     self._filename = md5(filename.encode('utf8')).hexdigest() + ext
     self._path = os.path.join(self._upload_dir, self._filename)
     self._realpath = os.path.realpath(os.path.dirname(self._path))
     self._path_orig = self._path
     # noinspection PyBroadException
     try:
         os.makedirs(self._realpath)
     except:
         pass
     self._fullpath = self._realpath + '/' + self._filename
     self._destination = BufferedWriter(FileIO(self._fullpath, "w"))
示例#31
0
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args:
    offset (int): byte offset -- start byte for ranged request
    length (int): total file length[!], equal to end + 1
    write_callbacks: (list[function])
    """
    ok_codes = [http.PARTIAL_CONTENT]

    write_callbacks = kwargs.get('write_callbacks', [])

    chunk_start = kwargs.get('offset', 0)
    length = kwargs.get('length', 100 * 1024 ** 4)

    retries = 0
    while chunk_start < length:
        chunk_end = chunk_start + CHUNK_SIZE - 1
        if chunk_end >= length:
            chunk_end = length - 1

        if retries >= CHUNK_MAX_RETRY:
            raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                               '[acd_cli] Downloading chunk failed multiple times.')
        r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

        logger.debug('Range %d-%d' % (chunk_start, chunk_end))
        # this should only happen at the end of unknown-length downloads
        if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
            logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
            break
        if r.status_code not in ok_codes:
            r.close()
            retries += 1
            logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
            continue

        curr_ln = 0
        # connection exceptions occur here
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                for wcb in write_callbacks:
                    wcb(chunk)
                curr_ln += len(chunk)
        chunk_start += CHUNK_SIZE
        retries = 0
        r.close()

    return
示例#32
0
    def serialise(self, datastructure: dict, filestream: BinaryIO) -> BinaryIO:
        buffer = BufferedWriter(filestream)

        kwargs = {
            "buffer": buffer,
            "encoding": self._encoding,
            "write_through": True
        }

        with TextIOWrapper(**kwargs) as textstream:
            self.dump(datastructure, textstream)
            buffer.flush()
            filestream.seek(0)
            return BytesIO(filestream.getvalue())
示例#33
0
 def setup(self, filename, upload_to):
     """ Creates the filename on the system, along with the required folder structure. """
     self.filename = filename
     self.upload_to = upload_to
     #logger.debug('File: '+self.filename)
     self._path = self.update_filename()
     #logger.debug('Dir: '+self._dir)
     #logger.debug('Path: '+self._path)
     #logger.debug(os.path.realpath(os.path.dirname(self._path)))
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#34
0
def export_stream(model, **kwargs):
    stream = BufferedWriter()
    if not isinstance(model, Course):
        raise NotImplementedError(
            'cannot export anything else than a course model ({} provided)'.
            format(model.__class__.__name_))

    with zipfile.zipfile(stream) as zfile:
        zfile.write(export_meta(model), IMSCC_MANIFEST_FILENAME)
        file_output(model, zfile)
        qtis = course_xmlqti_builder(model)  # we assume it is a course model
        discussions = course_xmlqti_builder(model)
        stream.seek(0)
    return stream
示例#35
0
def save_bits(file_name: str, bool_array: nparray, packed=True) -> int:
    '''
    Save bits to a file from a bool array.

    Parameters
    ----------
    file_name: string
        The name of the file to save.
    bool_array: numpy.array
        The bool array.
    packed: bool
        Whether to pack the bits into bytes.
        Defaults to True.

    Returns the number of bytes saved.
    '''
    with open(file_name, 'wb') as bit_file:
        writer = BufferedWriter(bit_file)
        count = 0

        if packed:
            for byte in pack_bools_to_bytes(bool_array):
                writer.write(byte)
                count += 1

        else:
            for byte in bools_to_bytes(bool_array):
                writer.write(byte)
                count += 1

        writer.flush()

    return count
示例#36
0
class PANDAAbstractUploadBackend(AbstractUploadBackend):
    """
    Customized backend to handle AJAX uploads.
    """
    def update_filename(self, request, filename):
        """
        Verify that the filename is unique, if it isn't append and iterate
        a counter until it is.
        """
        self._original_filename = filename

        filename = self._original_filename
        root, ext = os.path.splitext(self._original_filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)

        i = 1

        while os.path.exists(path):
            filename = '%s%i%s' % (root, i, ext)
            path = os.path.join(settings.MEDIA_ROOT, filename)
            i += 1

        return filename

    def setup(self, filename):
        """
        Open the destination file for writing.
        """
        self._path = os.path.join(settings.MEDIA_ROOT, filename)

        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass

        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        """
        Write a chunk of data to the destination.
        """
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        """
        Close the destination file.
        """
        self._dest.close()
示例#37
0
def download(url: str, fhandle: BufferedWriter, on_progress: Callable = None):
    """
    Download a file to a specific target. Inspired from
    Patrick Massot's code in leanproject.

    :param url: HTTP(s) url to download file from (GET request)
    :param path: File path on local filesystem
    :param on_progress: callback(idx,count, progress)
                        to monitor download progress.
    :return: the sha1 checksum of the downloaded file
    """

    # TODO(florian): better error handling ?
    # -> ConnectionError raised by requests.get
    # -> HTTPError raised by raise_for_status

    sha1 = hashlib.sha1()

    response = requests.get(url, stream=True)
    response.raise_for_status()  # Raise HTTPError if any

    tot_len = response.headers.get("content-length", 0)

    if not tot_len:
        fhandle.write(response.content)
        sha1.update(response.content)
    else:
        dl_size = 0
        tot_len = int(tot_len)
        progress = 0
        progress_prev = 0

        for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
            dl_size += len(chunk)
            fhandle.write(chunk)
            sha1.update(chunk)

            # Compute and display progress if /10
            progress_prev = progress
            progress = (100 * (dl_size / tot_len))
            if int(progress) % 10 == 0 and int(progress) != int(progress_prev):
                log.info(_("Progress : {:03d}%").format(int(progress)))

            # Trigger progress callback
            if on_progress is not None:
                on_progress(dl_size, tot_len, progress)

    return sha1.hexdigest()
示例#38
0
def stream_decompress(instr: io.BufferedReader,
                      outstr: io.BufferedWriter,
                      chunk_size=DEFAULT_MAX_CHUNK):
    """
    A stream processor that call decompress on bytes available in instr
    And write them into outstr
    :param instr: buffered reader
    :param outstr: buffered writer
    :param chunk: the sizeof chunk to read at one time. if 0 attempt to read as much as possible.
    :returns: compressed data size, original consumed data size
    """
    orig_data_size: int = 0
    decomp_data_size: int = 0
    inbytes: bytes = instr.read(chunk_size)

    # we find chunk indexes
    # Note: we dont care about next_ori_chunk_idx for decompressing everything
    # next_ori_chunk_idx = struct.unpack('>H', inbytes[0:2]) if inbytes else None
    next_comp_chunk_idx: int = struct.unpack('>H',
                                             inbytes[2:4]) if inbytes else None
    # careful : next_ori_chunk_idx is the location *after* decompression (ie. in the original uncompressed sequence)
    cur_chunk_idx = 4
    while inbytes:

        decomp_data = bytearray()
        while len(inbytes) > next_comp_chunk_idx:

            # if next chunk index is already in range, we can already uncompress this chunk
            decomp_data += decompress(
                inbytes[cur_chunk_idx:next_comp_chunk_idx])

            # find next chunk
            cur_chunk_idx = next_comp_chunk_idx
            next_comp_chunk_idx = inbytes[next_comp_chunk_idx]

        orig_data_size += len(inbytes)
        decomp_data_size += len(decomp_data)

        outstr.write(bytes(decomp_data))

        # correct the next chunk index value
        next_comp_chunk_idx = next_comp_chunk_idx - len(inbytes)
        cur_chunk_idx = 0

        # read more data in case it is now available
        inbytes = instr.read(chunk_size)

    return orig_data_size, decomp_data_size
示例#39
0
文件: utils.py 项目: getzlab/canine
def pandas_write_hdf5_buffered(df: pd.DataFrame, key: str,
                               buf: io.BufferedWriter):
    """
	Write a Pandas dataframe in HDF5 format to a buffer.
    """

    ## I am getting
    ##   HDF5ExtError("Unable to open/create file '/dev/null'")
    ##   unable to truncate a file which is already open
    with write_lock:
        with pd.HDFStore("/dev/null",
                         mode="w",
                         driver="H5FD_CORE",
                         driver_core_backing_store=0) as store:
            store["results"] = df
            buf.write(store._handle.get_file_image())
示例#40
0
 def setup(self, filename, *args, **kwargs):
     self._path = self.get_path(filename, *args, **kwargs)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except OSError:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#41
0
    def read(self, size):
        """
        Reads (receives) a whole block of size bytes from the connected peer.

        :param size: the size of the block to be read.
        :return: the read data with len(data) == size.
        """
        if self._socket is None:
            self._raise_connection_reset()

        # TODO Remove the timeout from this method, always use previous one
        with BufferedWriter(BytesIO(), buffer_size=size) as buffer:
            bytes_left = size
            while bytes_left != 0:
                try:
                    partial = self._socket.recv(bytes_left)
                except socket.timeout as e:
                    raise TimeoutError() from e
                except ConnectionError:
                    self._raise_connection_reset()
                except OSError as e:
                    if e.errno in CONN_RESET_ERRNOS:
                        self._raise_connection_reset()
                    else:
                        raise

                if len(partial) == 0:
                    self._raise_connection_reset()

                buffer.write(partial)
                bytes_left -= len(partial)

            # If everything went fine, return the read bytes
            buffer.flush()
            return buffer.raw.getvalue()
示例#42
0
 def setup(self, filename):
     self._path = os.path.join(settings.MEDIA_ROOT, self.upload_dir, self._timedir, filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#43
0
 def setup(self, filename, *args, **kwargs):
     self._path = os.path.join(
         settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#44
0
文件: ajax.py 项目: cihatkk/nnmware
 def setup(self, filename):
     ext = os.path.splitext(filename)[1]
     self._filename = md5(filename.encode('utf8')).hexdigest() + ext
     self._path = os.path.join(self._upload_dir, self._filename)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._destination = BufferedWriter(FileIO(self._path, "w"))
示例#45
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "wb"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        return {"path": path}
示例#46
0
文件: storage.py 项目: niran/panda
    def setup(self, filename):
        """
        Open the destination file for writing.
        """
        self._path = os.path.join(settings.MEDIA_ROOT, filename)

        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass

        self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#47
0
 def setup(self, filename):
     self._relative_path = os.path.normpath(
         os.path.join(
             force_unicode(
                 datetime.datetime.now().strftime( # allow %Y, %s, etc
                     smart_str(self.UPLOAD_DIR))),
             filename))
     self._path = os.path.join(settings.MEDIA_ROOT, self._relative_path)
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#48
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = 'tmp'

    def update_filename(self, request, filename):
        name, ext = os.path.splitext(filename)
        return slughifi(name) + ext

    def setup(self, filename):
        self._path = os.path.join(self.UPLOAD_DIR, filename)
        self.path = default_storage.save(self._path, ContentFile(''))
        self._abs_path = default_storage.path(self.path)
        self._dest = BufferedWriter(FileIO(self._abs_path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()

        context = {'thumbnail_path': self._path, 'file_name': filename, }
        thumbnail = render_to_string('ajaxupload/includes/thumbnail.html', context)
        return {"path": self._path, 'thumbnail': thumbnail}
示例#49
0
class FileDocumentLocalUploadBackend(LocalUploadBackend):
    def upload_to(self):
        d = datetime.datetime.now()
        return d.strftime('filedocument/%Y/%m/%d')


    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.upload_to(), filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))


    def upload_complete(self, request, filename, *args, **kwargs):
        cur_agent = kwargs['cur_agent']
        permissions = kwargs['permissions']
        relative_path = self.upload_to() + "/" + filename
        full_path = settings.MEDIA_URL + relative_path
        name = filename

        # auto categorize images
        image_extensions = ('.jpg', '.jpeg', '.png', '.gif',)
        if filename.endswith(image_extensions):
            new_item = ImageDocument(name=name, datafile=relative_path)
        else:
            new_item = FileDocument(name=name, datafile=relative_path)

        # link to item
        new_item.save_versioned(action_agent=cur_agent, initial_permissions=permissions)
        self._dest.close()
        return {
            "path": full_path,
            "url": new_item.get_absolute_url(),
            "name": new_item.name,
        }
示例#50
0
    def setup(self, filename):
        dirpath = self.quast_session.get_contigs_dirpath()
        logger.info('filename is %s' % filename)
        logger.info('contigs dirpath is %s' % dirpath)

        if not os.path.exists(dirpath):
            logger.error("contigs directory doesn't exist")
            return False

        fpath = os.path.join(dirpath, filename)

        self._path = fpath
        self._dest = BufferedWriter(FileIO(self._path, 'w'))
        return True
示例#51
0
 def setup(self, filename, upload_to):
     """ Creates the filename on the system, along with the required folder structure. """
     self.filename = filename
     self.upload_to = upload_to
     #logger.debug('File: '+self.filename)
     self._path = self.update_filename()
     #logger.debug('Dir: '+self._dir)
     #logger.debug('Path: '+self._path)
     #logger.debug(os.path.realpath(os.path.dirname(self._path)))
     try:
         os.makedirs(os.path.realpath(os.path.dirname(self._path)))
     except:
         pass
     self._dest = BufferedWriter(FileIO(self._path, "w"))
示例#52
0
文件: content.py 项目: nabcos/acd_cli
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args:
    offset: byte offset
    length: total length, equal to end - 1
    write_callback
    """
    ok_codes = [http.PARTIAL_CONTENT]

    write_callback = kwargs.get('write_callback', None)

    length = kwargs.get('length', 100 * 1024 ** 4)

    pgo = progress.Progress()
    chunk_start = kwargs.get('offset', 0)
    retries = 0
    while chunk_start < length:
        chunk_end = chunk_start + CHUNK_SIZE - 1
        if chunk_end >= length:
            chunk_end = length - 1

        if retries >= CHUNK_MAX_RETRY:
            raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                               '[acd_cli] Downloading chunk failed multiple times.')
        r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

        logger.debug('Range %d-%d' % (chunk_start, chunk_end))
        # this should only happen at the end of unknown-length downloads
        if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
            logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
            break
        if r.status_code not in ok_codes:
            r.close()
            retries += 1
            logging.debug('Chunk [%d-%d], retry %d.' % (retries, chunk_start, chunk_end))
            continue

        try:
            curr_ln = 0
            for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                if chunk:  # filter out keep-alive new chunks
                    file.write(chunk)
                    file.flush()
                    if write_callback:
                        write_callback(chunk)
                    curr_ln += len(chunk)
                    pgo.print_progress(length, curr_ln + chunk_start)
            chunk_start += CHUNK_SIZE
            retries = 0
            r.close()
        except (ConnectionError, ReadTimeoutError) as e:
            file.close()
            raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())

    print()  # break progress line
    return
示例#53
0
class LocalUploadBackend(AbstractUploadBackend):
    #UPLOAD_DIR = "uploads"
    # The below key must be synchronized with the implementing project
    # Used to store an array of unclaimed file_pks in the django session
    # So they can be claimed later when the anon user authenticates
    #SESSION_UNCLAIMED_FILES_KEY = KarmaSettings.SESSION_UNCLAIMED_FILES_KEY

    # When a file is uploaded anonymously, 
    # What username should we assign ownership to?
    # This is important because File.save
    # behavior will not set awarded_karma to True 
    # until an owner is assigned who has username != this
    #DEFAULT_UPLOADER_USERNAME = KarmaSettings.DEFAULT_UPLOADER_USERNAME

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload(self, uploaded, filename, raw_data):
        """ :raw_data: is 0/1 """
        try:
            if raw_data:
                # File was uploaded via ajax, and is streaming in.
                chunk = uploaded.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self.upload_chunk(chunk)
                    chunk = uploaded.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in uploaded.chunks():
                    self.upload_chunk(chunk)
            return True
        except:
            # things went badly.
            return False

    def upload_complete(self, request, filename, upload):
        path = settings.MEDIA_URL + "/" + filename
        self._dest.close()

        self._dir = settings.MEDIA_ROOT

        # Avoid File.objects.create, as this will try to make
        # Another file copy at FileField's 'upload_to' dir
        print "creating note"
        note = Note()
        note.name = filename
        note.note_file = os.path.join(self._dir, filename)
        note.course_id = request.GET['course_id']
        note.draft = True # Pending approval from user
        print "saving note"
        note.save()

        # FIXME: Make get or create
        print "setting up session vars"
        #import ipdb; ipdb.set_trace()
        if 'uploaded_files' in request.session:
            request.session['uploaded_files'].append(note.pk)
        else:
            request.session['uploaded_files'] = [note.pk]

        # Asynchronously process document with Google Documents API
        print "upload_complete, firing task"
        tasks.process_document.delay(note)

        return {'note_url': note.get_absolute_url()}

    def update_filename(self, request, filename):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        self._dir = settings.MEDIA_ROOT
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        #print "filename all ready exists. Trying  " + filename_no_extension + str(filename_suffix) + extension
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            #print "using filename: " + os.path.join(self._dir, filename)
            return filename
        else:
            #print "using filename: " + filename_no_extension + str(filename_suffix) + extension
            return filename_no_extension + str(filename_suffix) + extension
示例#54
0
class BinaryWriter:
    """
    Small utility class to write binary data.
    Also creates a "Memory Stream" if necessary
    """

    def __init__(self, stream=None):
        if not stream:
            stream = BytesIO()

        self.writer = BufferedWriter(stream)
        self.written_count = 0

    # region Writing

    # "All numbers are written as little endian." |> Source: https://core.telegram.org/mtproto
    def write_byte(self, value):
        """Writes a single byte value"""
        self.writer.write(pack('B', value))
        self.written_count += 1

    def write_int(self, value, signed=True):
        """Writes an integer value (4 bytes), which can or cannot be signed"""
        self.writer.write(
            int.to_bytes(
                value, length=4, byteorder='little', signed=signed))
        self.written_count += 4

    def write_long(self, value, signed=True):
        """Writes a long integer value (8 bytes), which can or cannot be signed"""
        self.writer.write(
            int.to_bytes(
                value, length=8, byteorder='little', signed=signed))
        self.written_count += 8

    def write_float(self, value):
        """Writes a floating point value (4 bytes)"""
        self.writer.write(pack('<f', value))
        self.written_count += 4

    def write_double(self, value):
        """Writes a floating point value (8 bytes)"""
        self.writer.write(pack('<d', value))
        self.written_count += 8

    def write_large_int(self, value, bits, signed=True):
        """Writes a n-bits long integer value"""
        self.writer.write(
            int.to_bytes(
                value, length=bits // 8, byteorder='little', signed=signed))
        self.written_count += bits // 8

    def write(self, data):
        """Writes the given bytes array"""
        self.writer.write(data)
        self.written_count += len(data)

    # endregion

    # region Telegram custom writing

    def tgwrite_bytes(self, data):
        """Write bytes by using Telegram guidelines"""
        if len(data) < 254:
            padding = (len(data) + 1) % 4
            if padding != 0:
                padding = 4 - padding

            self.write(bytes([len(data)]))
            self.write(data)

        else:
            padding = len(data) % 4
            if padding != 0:
                padding = 4 - padding

            self.write(bytes([254]))
            self.write(bytes([len(data) % 256]))
            self.write(bytes([(len(data) >> 8) % 256]))
            self.write(bytes([(len(data) >> 16) % 256]))
            self.write(data)

        self.write(bytes(padding))

    def tgwrite_string(self, string):
        """Write a string by using Telegram guidelines"""
        self.tgwrite_bytes(string.encode('utf-8'))

    def tgwrite_bool(self, boolean):
        """Write a boolean value by using Telegram guidelines"""
        #                     boolTrue                boolFalse
        self.write_int(0x997275b5 if boolean else 0xbc799737, signed=False)

    def tgwrite_date(self, datetime):
        """Converts a Python datetime object into Unix time (used by Telegram) and writes it"""
        value = 0 if datetime is None else int(datetime.timestamp())
        self.write_int(value)

    def tgwrite_object(self, tlobject):
        """Writes a Telegram object"""
        tlobject.on_send(self)

    def tgwrite_vector(self, vector):
        """Writes a vector of Telegram objects"""
        self.write_int(0x1cb5c415, signed=False)  # Vector's constructor ID
        self.write_int(len(vector))
        for item in vector:
            self.tgwrite_object(item)

    # endregion

    def flush(self):
        """Flush the current stream to "update" changes"""
        self.writer.flush()

    def close(self):
        """Close the current stream"""
        self.writer.close()

    def get_bytes(self, flush=True):
        """Get the current bytes array content from the buffer, optionally flushing first"""
        if flush:
            self.writer.flush()
        return self.writer.raw.getvalue()

    def get_written_bytes_count(self):
        """Gets the count of bytes written in the buffer.
           This may NOT be equal to the stream length if one was provided when initializing the writer"""
        return self.written_count

    # with block
    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
示例#55
0
class MyBaseUploadBackend(AbstractUploadBackend):
    def __init__(self, dirname, **kwargs):
        super(MyBaseUploadBackend, self).__init__(**kwargs)
        self.report_id = None

    def set_report_id(self, report_id):
        self.report_id = report_id
        try_number = 1
        while True:
            try:
                self.quast_session = QuastSession.objects.get(report_id=self.report_id)
                return True
            except QuastSession.DoesNotExist:
                logger.error('No quast session with report_id=%s' % self.report_id)
                return False
            except OperationalError:
                logger.error(traceback.format_exc())
                try_number += 1
                logger.error('Retrying. Try number ' + str(try_number))

    def setup(self, filename):
        dirpath = self.quast_session.get_contigs_dirpath()
        logger.info('filename is %s' % filename)
        logger.info('contigs dirpath is %s' % dirpath)

        if not os.path.exists(dirpath):
            logger.error("contigs directory doesn't exist")
            return False

        fpath = os.path.join(dirpath, filename)

        self._path = fpath
        self._dest = BufferedWriter(FileIO(self._path, 'w'))
        return True

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()

        file_index = "%x" % random.getrandbits(128)
        c_fn = ContigsFile(fname=filename, file_index=file_index)
        c_fn.save()
        qc = QuastSession_ContigsFile(contigs_file=c_fn, quast_session=self.quast_session)
        qc.save()

        logger.info('%s' % filename)

        return {
            'file_index': file_index,
        }

    def update_filename(self, request, filename):
        dirpath = self.quast_session.get_contigs_dirpath()
        logger.info('contigs dirpath is %s' % dirpath)

        fpath = os.path.join(dirpath, filename)
        logger.info('file path is %s' % fpath)

        i = 2
        base_fpath = fpath
        base_filename = filename
        while os.path.isfile(fpath):
            fpath = str(base_fpath) + '__' + str(i)
            filename = str(base_filename) + '__' + str(i)
            i += 1

        return filename

    def remove(self, request):
        if 'fileIndex' not in request.GET:
            logger.error('Request.GET must contain "fileIndex"')
            return False, 'Request.GET must contain "fileIndex"'

        file_index = request.GET['fileIndex']
        try:
            contigs_file = self.quast_session.contigs_files.get(file_index=file_index)
        except ContigsFile.DoesNotExist:
            logger.error('No file with such index %d in this quast_session' % file_index)
            return False, 'No file with such index'

        success, msg = self.__remove(contigs_file)
        return success, msg

#        if contigs_file.user_session != self.user_session:
#            logger.error('This file (%s) belongs to session %s, this session is %s'
#                         % (fname, str(contigs_file.user_session ), str(self.user_session.session_key)))
#            return False, 'This file does not belong to this session'


    def __remove(self, contigs_file):
        fname = contigs_file.fname
        contigs_fpath = os.path.join(self.quast_session.get_contigs_dirpath(), fname)

        if os.path.isfile(contigs_fpath):
            try:
                os.remove(contigs_fpath)
            except IOError as e:
                logger.error('IOError when removing "%s", fileIndex=%d": %s' % (fname, file_index, e.message))
                return False, 'Cannot remove file'

        try:
            contigs_file.delete()
        except DatabaseError as e:
            logger.warn('DatabaseError when removing "%s", fileIndex=%d: %s' % (fname, file_index, e.message))
            return False, 'Data base error when removing file'
        except Exception as e:
            logger.error('Exception when removing "%s", fileIndex=%d: %s' % (fname, file_index, e.message))
            return False, 'Data base exception when removing file'

        return True, ''

    def remove_all(self, request):
#        if 'fileNames' not in request.GET:
#            logger.error('remove_all: Request.GET must contain "fileNames"')
#            return False
#
#        file_names_one_string = request.GET['fileNames']
#        file_names = file_names_one_string.split('\n')[:-1]

#        this_user_contigs_files = ContigsFile.objects.filter(user_session=self.user_session)

        logger.info('uploader_backend.remove_all')
        for c_f in self.quast_session.contigs_files.all():
            success, msg = self.__remove(c_f)

        return True

    def get_uploads(self, request):
        contigs_files = self.quast_session.contigs_files.all()

        return [{"fileName": c_f.fname,
                 "fileIndex": c_f.file_index,
                 "file_index": c_f.file_index,
               # "fileSize": c_f.file_size if c_f.file_size else None,
                 } for c_f in contigs_files]
示例#56
0
    def __init__(self, stream=None):
        if not stream:
            stream = BytesIO()

        self.writer = BufferedWriter(stream)
        self.written_count = 0
示例#57
0
 def write(self, b):
     ret = BufferedWriter.write(self, b)
     self.flush()
     return ret
示例#58
0
class AjaxUploader(object):
    BUFFER_SIZE = 10485760  # 10MB

    def __init__(self, filetype='file', upload_dir='files', size_limit=10485760):
        self._upload_dir = os.path.join(settings.MEDIA_ROOT, upload_dir, get_date_directory())
        self._filetype = filetype
        if filetype == 'image':
            self._save_format = setting('IMAGE_UPLOAD_FORMAT', 'JPEG')
        else:
            self._save_format = None
        self._size_limit = size_limit

    def max_size(self):
        """
        Checking file max size
        """
        if int(self._destination.tell()) > self._size_limit:
            self._destination.close()
            os.remove(self._path)
            return True

    def setup(self, filename):
        ext = os.path.splitext(filename)[1]
        self._filename = md5(filename.encode('utf8')).hexdigest() + ext
        self._path = os.path.join(self._upload_dir, self._filename)
        # noinspection PyBroadException
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._destination = BufferedWriter(FileIO(self._path, "w"))

    def handle_upload(self, request):
        is_raw = True
        if request.FILES:
            is_raw = False
            if len(request.FILES) == 1:
                upload = request.FILES.values()[0]
            else:
                return dict(success=False, error=_("Bad upload."))
            filename = upload.name
        else:
            # the file is stored raw in the request
            upload = request
            # get file size
            try:
                filename = request.GET['qqfile']
            except KeyError as aerr:
                return dict(success=False, error=_("Can't read file name"))
        self.setup(filename)
        # noinspection PyBroadException
        try:
            if is_raw:
                # File was uploaded via ajax, and is streaming in.
                chunk = upload.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
                    chunk = upload.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in upload.chunks():
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
        except:
            # things went badly.
            return dict(success=False, error=_("Upload error"))
        self._destination.close()
        if self._filetype == 'image':
            # noinspection PyBroadException
            try:
                i = Image.open(self._path)
            except:
                os.remove(self._path)
                return dict(success=False, error=_("File is not image format"))
            f_name, f_ext = os.path.splitext(self._filename)
            f_without_ext = os.path.splitext(self._path)[0]
            new_path = ".".join([f_without_ext, self._save_format.lower()])
            if setting('IMAGE_STORE_ORIGINAL', False):
                # TODO need change the extension
                orig_path = ".".join([f_without_ext + '_orig', self._save_format.lower()])
                shutil.copy2(self._path, orig_path)
            i.thumbnail((1200, 1200), Image.ANTIALIAS)
            # noinspection PyBroadException
            try:
                if self._path == new_path:
                    i.save(self._path, self._save_format)
                else:
                    i.save(new_path, self._save_format)
                    os.remove(self._path)
                    self._path = new_path
            except:
                # noinspection PyBroadException
                try:
                    os.remove(self._path)
                    os.remove(new_path)
                except:
                    pass
                return dict(success=False, error=_("Error saving image"))
            self._filename = ".".join([f_name, self._save_format.lower()])
        return dict(success=True, fullpath=self._path, path=os.path.relpath(self._path, '/' + settings.MEDIA_ROOT),
                    old_filename=filename, filename=self._filename)
示例#59
0
def write(data, wfile: io.BufferedWriter) -> None:
    wfile.write(json.dumps(data).encode() + b"\n")
    wfile.flush()