Esempio n. 1
0
def stream_compress(instr: io.BufferedReader,
                    outstr: io.BufferedWriter,
                    chunk_size=DEFAULT_MAX_CHUNK):
    """
    A stream processor that call compress on bytes available in instr
    And write them into outstr
    :param instr: buffered reader
    :param outstr: buffered writer
    :param chunk: the sizeof chunk to read at one time. if 0 attempt to read as much as possible.
    :returns: original consumed data size, compressed data size
    """
    orig_data_size: int = 0
    comp_data_size: int = 0
    inbytes: bytes = instr.read(chunk_size)
    while inbytes:

        data_comp = compress(inbytes)

        # we prepend with uncompressed data chunk size
        # to be used later for random access

        orig_data_size += len(inbytes)

        # '>H' is unsigned short format, fit on 2 bytes.
        output = struct.pack('>H', len(inbytes)) + struct.pack(
            '>H', len(data_comp)) + data_comp
        # we need to include the chunk indexes in the compressed size
        comp_data_size += len(output)

        outstr.write(output)

        # keep consuming data, in case more is available...
        inbytes = instr.read(chunk_size)

    return orig_data_size, comp_data_size
Esempio n. 2
0
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args: write_callback"""
    r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True)
    if r.status_code not in OK_CODES:
        raise RequestError(r.status_code, r.text)

    write_callback = kwargs.get('write_callback', None)

    total_ln = int(r.headers.get('content-length'))
    length = kwargs.get('length', None)
    if length and total_ln != length:
        logging.info('Length mismatch: argument %d, content %d' % (length, total_ln))

    pgo = progress.Progress()
    curr_ln = 0
    try:
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                if write_callback:
                    write_callback(chunk)
                curr_ln += len(chunk)
                pgo.print_progress(total_ln, curr_ln)
    except (ConnectionError, ReadTimeoutError) as e:
        raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())
    print()  # break progress line
    r.close()
    return
Esempio n. 3
0
class PANDAUploadBackend(AbstractUploadBackend):
    """
    Customized backend to handle AJAX uploads.
    """
    def update_filename(self, request, filename):
        """
        Verify that the filename is unique, if it isn't append and iterate
        a counter until it is.
        """
        self._original_filename = filename

        filename = self._original_filename
        root, ext = os.path.splitext(self._original_filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)

        i = 1

        while os.path.exists(path):
            filename = '%s%i%s' % (root, i, ext)
            path = os.path.join(settings.MEDIA_ROOT, filename)
            i += 1

        return filename 

    def setup(self, filename):
        """
        Open the destination file for writing.
        """
        self._path = os.path.join(settings.MEDIA_ROOT, filename)

        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass

        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        """
        Write a chunk of data to the destination.
        """
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        """
        Close the destination file and create an Upload object in the
        database recording its existence.
        """
        self._dest.close()

        root, ext = os.path.splitext(filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)
        size = os.path.getsize(path)

        upload = Upload.objects.create(
            filename=filename,
            original_filename=self._original_filename,
            size=size)

        return { 'id': upload.id }
Esempio n. 4
0
def save_bits(file_name: str, bool_array: nparray, packed=True) -> int:
    '''
    Save bits to a file from a bool array.

    Parameters
    ----------
    file_name: string
        The name of the file to save.
    bool_array: numpy.array
        The bool array.
    packed: bool
        Whether to pack the bits into bytes.
        Defaults to True.

    Returns the number of bytes saved.
    '''
    with open(file_name, 'wb') as bit_file:
        writer = BufferedWriter(bit_file)
        count = 0

        if packed:
            for byte in pack_bools_to_bytes(bool_array):
                writer.write(byte)
                count += 1

        else:
            for byte in bools_to_bytes(bool_array):
                writer.write(byte)
                count += 1

        writer.flush()

    return count
Esempio n. 5
0
def consecutive_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args: write_callback"""
    r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content',
                           stream=True)
    if r.status_code not in OK_CODES:
        raise RequestError(r.status_code, r.text)

    write_callback = kwargs.get('write_callback', None)

    total_ln = int(r.headers.get('content-length'))
    length = kwargs.get('length', None)
    if length and total_ln != length:
        logging.info('Length mismatch: argument %d, content %d' %
                     (length, total_ln))

    pgo = progress.Progress()
    curr_ln = 0
    try:
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                if write_callback:
                    write_callback(chunk)
                curr_ln += len(chunk)
                pgo.print_progress(total_ln, curr_ln)
    except (ConnectionError, ReadTimeoutError) as e:
        raise RequestError(RequestError.CODE.READ_TIMEOUT,
                           '[acd_cli] Timeout. ' + e.__str__())
    print()  # break progress line
    r.close()
    return
Esempio n. 6
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter,
                         **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get('write_callbacks', [])

        chunk_start = kwargs.get('offset', 0)
        length = kwargs.get('length', 100 * 1024**4)

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + CHUNK_SIZE - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= CHUNK_MAX_RETRY:
                raise RequestError(
                    RequestError.CODE.FAILED_SUBREQUEST,
                    '[acd_api] Downloading chunk failed multiple times.')
            r = self.BOReq.get(
                self.content_url + 'nodes/' + node_id + '/content',
                stream=True,
                acc_codes=ok_codes,
                headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

            logger.debug('Range %d-%d' % (chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                r.close()
                logger.debug('Invalid byte range requested %d-%d' %
                             (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug('Chunk [%d-%d], retry %d.' %
                              (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()

            chunk_start += CHUNK_SIZE
            retries = 0

        return
Esempio n. 7
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = ajaxuploader_settings.UPLOAD_DIRECTORY
    # TODO: allow this to be overridden per-widget/view

    def setup(self, filename):
        self._relative_path = os.path.normpath(
            os.path.join(
                force_unicode(
                    datetime.datetime.now().strftime( # allow %Y, %s, etc
                        smart_str(self.UPLOAD_DIR))),
                filename))
        self._path = os.path.join(settings.MEDIA_ROOT, self._relative_path)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()
        return {"path": self._relative_path}

    def update_filename(self, request, filename):
        return ajaxuploader_settings.SANITIZE_FILENAME(filename)
Esempio n. 8
0
def send_packet(writer: io.BufferedWriter,
                cmd: int,
                flags: int = 0,
                data: bytes = b'') -> None:
    packet = Packet(cmd, flags, len(data), data, protocol_utils.Formats.HEADER)
    writer.write(packet.to_bytes())
    writer.flush()
Esempio n. 9
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")

    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR,
                                  filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self, request, filename, *args, **kwargs):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename, *args, **kwargs):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        filename = os.path.basename(filename)
        self._dir = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(
                            filename)
                        open(
                            os.path.join(
                                self._dir, filename_no_extension +
                                str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            return filename
        else:
            return filename_no_extension + str(filename_suffix) + extension

    @property
    def path(self):
        """
        Return a path of file uploaded
        """
        return self._path
Esempio n. 10
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = getattr(settings, "UPLOAD_DIR", "uploads")


    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self, request, filename, *args, **kwargs):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename, *args, **kwargs):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        filename = os.path.basename(filename)
        self._dir = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            return filename
        else:
            return filename_no_extension + str(filename_suffix) + extension

    @property
    def path(self):
        """
        Return a path of file uploaded
        """
        return self._path
Esempio n. 11
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get("write_callbacks", [])

        chunk_start = kwargs.get("offset", 0)
        length = kwargs.get("length", 100 * 1024 ** 4)

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + CHUNK_SIZE - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= CHUNK_MAX_RETRY:
                raise RequestError(
                    RequestError.CODE.FAILED_SUBREQUEST, "[acd_api] Downloading chunk failed multiple times."
                )
            r = self.BOReq.get(
                self.content_url + "nodes/" + node_id + "/content",
                stream=True,
                acc_codes=ok_codes,
                headers={"Range": "bytes=%d-%d" % (chunk_start, chunk_end)},
            )

            logger.debug("Range %d-%d" % (chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                logger.debug("Invalid byte range requested %d-%d" % (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug("Chunk [%d-%d], retry %d." % (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()

            chunk_start += CHUNK_SIZE
            retries = 0

        return
Esempio n. 12
0
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args:
    offset: byte offset
    length: total length, equal to end - 1
    write_callback
    """
    ok_codes = [http.PARTIAL_CONTENT]

    write_callback = kwargs.get('write_callback', None)

    length = kwargs.get('length', 100 * 1024 ** 4)

    pgo = progress.Progress()
    chunk_start = kwargs.get('offset', 0)
    retries = 0
    while chunk_start < length:
        chunk_end = chunk_start + CHUNK_SIZE - 1
        if chunk_end >= length:
            chunk_end = length - 1

        if retries >= CHUNK_MAX_RETRY:
            raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                               '[acd_cli] Downloading chunk failed multiple times.')
        r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

        logger.debug('Range %d-%d' % (chunk_start, chunk_end))
        # this should only happen at the end of unknown-length downloads
        if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
            logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
            break
        if r.status_code not in ok_codes:
            r.close()
            retries += 1
            logging.debug('Chunk [%d-%d], retry %d.' % (retries, chunk_start, chunk_end))
            continue

        try:
            curr_ln = 0
            for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
                if chunk:  # filter out keep-alive new chunks
                    file.write(chunk)
                    file.flush()
                    if write_callback:
                        write_callback(chunk)
                    curr_ln += len(chunk)
                    pgo.print_progress(length, curr_ln + chunk_start)
            chunk_start += CHUNK_SIZE
            retries = 0
            r.close()
        except (ConnectionError, ReadTimeoutError) as e:
            file.close()
            raise RequestError(RequestError.CODE.READ_TIMEOUT, '[acd_cli] Timeout. ' + e.__str__())

    print()  # break progress line
    return
Esempio n. 13
0
    def _encode(cls, file: BufferedReader, archive_file: BufferedWriter,
                file_path: str):

        encoding_dictionary = cls.get_encoding_dictionary(file)
        archive_file.write(cls._get_file_path_data(file_path))
        archive_file.write(cls._get_dictionary_data(encoding_dictionary))
        archive_file.write(cls._compose_data(cls._get_control_sum(file)))

        cls._write_encoded_file_data(file, archive_file, encoding_dictionary)
Esempio n. 14
0
    def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs):
        """:param kwargs:
        offset (int): byte offset -- start byte for ranged request
        length (int): total file length[!], equal to end + 1
        write_callbacks (list[function])
        """
        ok_codes = [http.PARTIAL_CONTENT]

        write_callbacks = kwargs.get('write_callbacks', [])

        chunk_start = kwargs.get('offset', 0)
        length = kwargs.get('length', 100 * 1024 ** 4)

        dl_chunk_sz = self._conf.getint('transfer', 'dl_chunk_size')

        retries = 0
        while chunk_start < length:
            chunk_end = chunk_start + dl_chunk_sz - 1
            if chunk_end >= length:
                chunk_end = length - 1

            if retries >= self._conf.getint('transfer', 'chunk_retries'):
                raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                                   '[acd_api] Downloading chunk failed multiple times.')
            r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

            logger.debug('Node "%s", range %d-%d' % (node_id, chunk_start, chunk_end))
            # this should only happen at the end of unknown-length downloads
            if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
                r.close()
                logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
                break
            if r.status_code not in ok_codes:
                r.close()
                retries += 1
                logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
                continue

            curr_ln = 0
            try:
                for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')):
                    if chunk:  # filter out keep-alive new chunks
                        file.write(chunk)
                        file.flush()
                        for wcb in write_callbacks:
                            wcb(chunk)
                        curr_ln += len(chunk)
            finally:
                r.close()
                chunk_start = file.tell()

            retries = 0

        return
Esempio n. 15
0
 def _extract(self, frame:ImageFrame, dstfile:io.BufferedWriter, imgfile:io.BufferedReader, **kwargs):
     """override optional (if no encryption/compression)"""
     imgfile.seek(frame.offset)
     dstdata = imgfile.read(frame.length)
     if len(dstdata) != frame.length:
         raise ValueError('Image Frame data length does not match, expected {0.length!r} bytes, not {1!r} bytes'.format(frame, len(dstdata)))
     if dstfile is None:
         return dstdata
     else:
         dstfile.write(dstdata)
Esempio n. 16
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        self._dir = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        print "orig filename: " + os.path.join(self._dir, filename)

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        print "filename all ready exists. Trying  " + filename_no_extension + str(filename_suffix) + extension
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            print "using filename: " + os.path.join(self._dir, filename)
            return filename
        else:
            print "using filename: " + filename_no_extension + str(filename_suffix) + extension
            return filename_no_extension + str(filename_suffix) + extension
Esempio n. 17
0
 def _extract(self, entry: ArchiveEntry, dstfile: io.BufferedWriter,
              arcfile: io.BufferedReader, **kwargs):
     """override optional (if no encryption/compression)"""
     arcfile.seek(entry.offset)
     dstdata = arcfile.read(entry.length)
     if len(dstdata) != entry.length:
         raise ValueError(
             'Archive Entry data length does not match, expected {0.length!r} bytes, not {1!r} bytes'
             .format(entry, len(dstdata)))
     if dstfile is None:
         return dstdata
     else:
         dstfile.write(dstdata)
Esempio n. 18
0
def chunked_download(node_id: str, file: io.BufferedWriter, **kwargs):
    """Keyword args:
    offset (int): byte offset -- start byte for ranged request
    length (int): total file length[!], equal to end + 1
    write_callbacks: (list[function])
    """
    ok_codes = [http.PARTIAL_CONTENT]

    write_callbacks = kwargs.get('write_callbacks', [])

    chunk_start = kwargs.get('offset', 0)
    length = kwargs.get('length', 100 * 1024 ** 4)

    retries = 0
    while chunk_start < length:
        chunk_end = chunk_start + CHUNK_SIZE - 1
        if chunk_end >= length:
            chunk_end = length - 1

        if retries >= CHUNK_MAX_RETRY:
            raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
                               '[acd_cli] Downloading chunk failed multiple times.')
        r = BackOffRequest.get(get_content_url() + 'nodes/' + node_id + '/content', stream=True,
                               acc_codes=ok_codes,
                               headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)})

        logger.debug('Range %d-%d' % (chunk_start, chunk_end))
        # this should only happen at the end of unknown-length downloads
        if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE:
            logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end))
            break
        if r.status_code not in ok_codes:
            r.close()
            retries += 1
            logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries))
            continue

        curr_ln = 0
        # connection exceptions occur here
        for chunk in r.iter_content(chunk_size=FS_RW_CHUNK_SZ):
            if chunk:  # filter out keep-alive new chunks
                file.write(chunk)
                file.flush()
                for wcb in write_callbacks:
                    wcb(chunk)
                curr_ln += len(chunk)
        chunk_start += CHUNK_SIZE
        retries = 0
        r.close()

    return
Esempio n. 19
0
def download(url: str, fhandle: BufferedWriter, on_progress: Callable = None):
    """
    Download a file to a specific target. Inspired from
    Patrick Massot's code in leanproject.

    :param url: HTTP(s) url to download file from (GET request)
    :param path: File path on local filesystem
    :param on_progress: callback(idx,count, progress)
                        to monitor download progress.
    :return: the sha1 checksum of the downloaded file
    """

    # TODO(florian): better error handling ?
    # -> ConnectionError raised by requests.get
    # -> HTTPError raised by raise_for_status

    sha1 = hashlib.sha1()

    response = requests.get(url, stream=True)
    response.raise_for_status()  # Raise HTTPError if any

    tot_len = response.headers.get("content-length", 0)

    if not tot_len:
        fhandle.write(response.content)
        sha1.update(response.content)
    else:
        dl_size = 0
        tot_len = int(tot_len)
        progress = 0
        progress_prev = 0

        for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
            dl_size += len(chunk)
            fhandle.write(chunk)
            sha1.update(chunk)

            # Compute and display progress if /10
            progress_prev = progress
            progress = (100 * (dl_size / tot_len))
            if int(progress) % 10 == 0 and int(progress) != int(progress_prev):
                log.info(_("Progress : {:03d}%").format(int(progress)))

            # Trigger progress callback
            if on_progress is not None:
                on_progress(dl_size, tot_len, progress)

    return sha1.hexdigest()
Esempio n. 20
0
class PANDAAbstractUploadBackend(AbstractUploadBackend):
    """
    Customized backend to handle AJAX uploads.
    """
    def update_filename(self, request, filename):
        """
        Verify that the filename is unique, if it isn't append and iterate
        a counter until it is.
        """
        self._original_filename = filename

        filename = self._original_filename
        root, ext = os.path.splitext(self._original_filename)
        path = os.path.join(settings.MEDIA_ROOT, filename)

        i = 1

        while os.path.exists(path):
            filename = '%s%i%s' % (root, i, ext)
            path = os.path.join(settings.MEDIA_ROOT, filename)
            i += 1

        return filename

    def setup(self, filename):
        """
        Open the destination file for writing.
        """
        self._path = os.path.join(settings.MEDIA_ROOT, filename)

        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass

        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        """
        Write a chunk of data to the destination.
        """
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        """
        Close the destination file.
        """
        self._dest.close()
Esempio n. 21
0
def stream_decompress(instr: io.BufferedReader,
                      outstr: io.BufferedWriter,
                      chunk_size=DEFAULT_MAX_CHUNK):
    """
    A stream processor that call decompress on bytes available in instr
    And write them into outstr
    :param instr: buffered reader
    :param outstr: buffered writer
    :param chunk: the sizeof chunk to read at one time. if 0 attempt to read as much as possible.
    :returns: compressed data size, original consumed data size
    """
    orig_data_size: int = 0
    decomp_data_size: int = 0
    inbytes: bytes = instr.read(chunk_size)

    # we find chunk indexes
    # Note: we dont care about next_ori_chunk_idx for decompressing everything
    # next_ori_chunk_idx = struct.unpack('>H', inbytes[0:2]) if inbytes else None
    next_comp_chunk_idx: int = struct.unpack('>H',
                                             inbytes[2:4]) if inbytes else None
    # careful : next_ori_chunk_idx is the location *after* decompression (ie. in the original uncompressed sequence)
    cur_chunk_idx = 4
    while inbytes:

        decomp_data = bytearray()
        while len(inbytes) > next_comp_chunk_idx:

            # if next chunk index is already in range, we can already uncompress this chunk
            decomp_data += decompress(
                inbytes[cur_chunk_idx:next_comp_chunk_idx])

            # find next chunk
            cur_chunk_idx = next_comp_chunk_idx
            next_comp_chunk_idx = inbytes[next_comp_chunk_idx]

        orig_data_size += len(inbytes)
        decomp_data_size += len(decomp_data)

        outstr.write(bytes(decomp_data))

        # correct the next chunk index value
        next_comp_chunk_idx = next_comp_chunk_idx - len(inbytes)
        cur_chunk_idx = 0

        # read more data in case it is now available
        inbytes = instr.read(chunk_size)

    return orig_data_size, decomp_data_size
    def send_generic(msg: KQMLPerformative, out: BufferedWriter):
        """Basic send mechanism copied (more or less) from pykqml. Writes the
        msg as a string to the output buffer then flushes it.

        Args:
            msg (KQMLPerformative): Message to be sent
            out (BufferedWriter): The output to write to, needed for sending to
                Companions and sending on our own port.
        """
        LOGGER.debug('Sending: %s', msg)
        try:
            msg.write(out)
        except IOError:
            LOGGER.error('IOError during message sending')
        out.write(b'\n')
        out.flush()
Esempio n. 23
0
def pandas_write_hdf5_buffered(df: pd.DataFrame, key: str,
                               buf: io.BufferedWriter):
    """
	Write a Pandas dataframe in HDF5 format to a buffer.
    """

    ## I am getting
    ##   HDF5ExtError("Unable to open/create file '/dev/null'")
    ##   unable to truncate a file which is already open
    with write_lock:
        with pd.HDFStore("/dev/null",
                         mode="w",
                         driver="H5FD_CORE",
                         driver_core_backing_store=0) as store:
            store["results"] = df
            buf.write(store._handle.get_file_image())
Esempio n. 24
0
    def assemble_script(self, writer: io.BufferedWriter) -> NoReturn:
        if not isinstance(writer, StructIO):
            writer = StructIO(writer)

        # header:
        if self.signature not in (self.SIGNATURE_ENCRYPTED,
                                  self.SIGNATURE_DECRYPTED):
            raise Exception(
                f'{self.__class__.__name__} signature must be {self.SIGNATURE_ENCRYPTED.decode("cp932")!r} or {self.SIGNATURE_DECRYPTED.decode("cp932")!r}, not {self.signature.decode("cp932")!r}'
            )
        writer.pack('<16sIII', self.signature, self.main_offset,
                    self.line_count, len(self.functions))
        is_encrypted: bool = (self.signature == self.SIGNATURE_ENCRYPTED)
        assert (is_encrypted ^ (self.signature in (self.SIGNATURE_DECRYPTED,
                                                   self.SIGNATURE_PLAIN)))

        # functions table:
        for fn in self.functions:
            writer.pack('<II', *fn)  # fn.name_hash, fn.offset

        # bytecode:
        writer.pack('<I', self.bytecode_size)

        # initialize full-length of bytecode ahead of time (is this actually efficient in Python?)
        ms: io.BytesIO = io.BytesIO(bytes(self.bytecode_size))
        self.assemble_bytecode(StructIO(ms))
        ms.flush()

        bytecode: bytes = ms.getvalue()
        if is_encrypted:
            bytecode = crypt.crypt32(bytecode)  # encrypt bytecode
        written_size = writer.write(bytecode)
        assert (written_size == self.bytecode_size)
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename):
        self._path = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR,
                                  filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        return {"path": path}
Esempio n. 26
0
    def _dump(self, data: TLM, file: BufferedWriter, *args, **kwargs):
        size_x, size_y = (0, 0)
        if data.lod_images:
            size_x, size_y = data.lod_images[0].size

        file.write(
            pack('<11I', data.unknown_00, data.created_at.year,
                 data.created_at.month, data.created_at.day,
                 data.created_at.hour, data.created_at.minute,
                 data.created_at.second, data.created_at.microsecond,
                 data.unknown_01, size_x, size_y))

        for lod_image in data.lod_images:
            lod_array = np.array(lod_image.getdata(), dtype=np.uint8)
            lod_array = lod_array.reshape(lod_image.size[0], lod_image.size[1],
                                          4)
            lod_array = self._swap_rb(lod_array)
            file.write(lod_array.tobytes())
Esempio n. 27
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "wb"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        return {"path": path}
Esempio n. 28
0
 def write_to_stream(self, writer_stream: BufferedWriter):
     if self._is_operation_result():
         with open(self._file_path, "rb", -1) as file:
             while True:
                 buffer = file.read(io.DEFAULT_BUFFER_SIZE)
                 writer_stream.write(buffer)
                 if len(buffer) < io.DEFAULT_BUFFER_SIZE:
                     break
         writer_stream.close()
         self._logger.info(
             "Writing file at {tmp_file_path} to writer stream".format(
                 tmp_file_path=self._file_path))
     else:
         self._logger.error(
             "Invalid use of write_to_stream(). Method invoked on FileRef instance which does not point to an operation "
             "result")
         raise AttributeError(
             "Method write_to_stream() only allowed on operation results")
Esempio n. 29
0
def main(
    image1: BufferedReader,
    image2: BufferedReader,
    url: str,
    output: BufferedWriter,
    verbose: bool,
    display: bool,
):
    """
    Thrash detector API client. 
    """
    files = {"image1": image1.read(), "image2": image2.read()}
    url_path = url.rstrip("/") + "/api/thrash/json"
    if verbose:
        click.echo(f"Querying thrash detector API on {url_path}")
        click.echo(f"Image 1: {image1.name}")
        click.echo(f"Image 2: {image2.name}")

    response = requests.post(url_path, files=files)

    if response.status_code != 200:
        click.echo(
            message=f"API request failed with code {response.status_code}.",
            err=True)
        click.echo(message=f"{response.json()['error']}", err=True)
        sys.exit(response.status_code)

    data = response.json()
    image = data["encoded_img"]["bytes"]
    bounds = data["bounds"]
    click.echo(f"Bounding box: {bounds}")

    if verbose:
        click.echo(f"Save bounding box image to {output.name}")

    image = base64.decodebytes(image.encode("ascii"))
    output.write(image)

    if display:
        res = cv2.imdecode(np.frombuffer(image, np.uint8), -1)
        cv2.imshow("bounding box", res)
        click.echo("Press any key to close the image")
        cv2.waitKey(0)
Esempio n. 30
0
    def download_file(self, path: str, stream: io.BufferedWriter) -> Response:
        """
        Calls the gRPC DownloadFile function

        :param path: (str) The location of the file which needs to be downloaded
        :param stream: (io.BufferedWriter) A BufferedWriter to write to
        :return: (Response) The response object containing values corresponding to the request
        """
        meta = self._make_meta()
        download_file_request = server_pb2.DownloadFileRequest(path=path,
                                                               meta=meta)
        for response in self.stub.DownloadFile(download_file_request):
            if response.status == 200:
                stream.write(response.payload)
            else:
                return Response(
                    server_pb2.Response(status=response.status,
                                        error=response.error))
        return Response(server_pb2.Response(status=200))
Esempio n. 31
0
    def doExport(self, file: BufferedWriter) -> None:
        cardIds = self.cardIds()
        data = []
        for id, flds, tags in self.col.db.execute("""
select guid, flds, tags from notes
where id in
(select nid from cards
where cards.id in %s)""" % ids2str(cardIds)):
            row = []
            # note id
            if self.includeID:
                row.append(str(id))
            # fields
            row.extend([self.processText(f) for f in splitFields(flds)])
            # tags
            if self.includeTags:
                row.append(tags.strip())
            data.append("\t".join(row))
        self.count = len(data)
        out = "\n".join(data)
        file.write(out.encode("utf-8"))
Esempio n. 32
0
def download_document(outfile):
    headers = {'user-agent': I_USERAGENT}

    conn = http.client.HTTPSConnection(I_HOST)
    try:
        conn.request('GET', I_TEX_URL, headers=headers)
        rsp = conn.getresponse()

        CHUNK_SIZE = 1024 * 10

        reader = BufferedReader(rsp)
        with open(outfile, 'wb') as outf:
            writer = BufferedWriter(outf)
            while True:
                data = reader.read(CHUNK_SIZE)
                if len(data) > 0:
                    writer.write(data)
                else:
                    break
    finally:
        conn.close()
Esempio n. 33
0
def _write_data(outfile: io.BufferedWriter, data):
    """ Writes the 86p data section to the outfile

        Args:
            outfile - Outfile to write to.
            data - Data buffer
    """
    checksum = 0
    # Header struct format:
    # H -> Always 12 (0x0c00)
    # H -> length of the variable data
    # B -> Data Type ID
    # B -> Length of the variable name
    # 8s -> Variable name, padding w/ space characters
    # H -> length of the variable data (again)
    vname = bytes('MiniRPG', 'ascii')
    # We add 4 to the data len to account for the program header.
    # So many headers.
    dlen = len(data) + 4
    entry_header = pack('H H B B 8s H', 12, dlen, 18, 7, vname, dlen)
    outfile.write(entry_header)
    checksum += reduce(lambda x, y: x + y, entry_header)

    # Write the ASM program into the fileformat
    prog_header = pack('H B B', len(data), 0x8e, 0x28)
    outfile.write(prog_header)
    checksum += reduce(lambda x, y: x + y, prog_header)

    outfile.write(bytes(data))
    checksum += reduce(lambda x, y: x + y, data)

    return checksum
Esempio n. 34
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = 'tmp'

    def update_filename(self, request, filename):
        name, ext = os.path.splitext(filename)
        return slughifi(name) + ext

    def setup(self, filename):
        self._path = os.path.join(self.UPLOAD_DIR, filename)
        self.path = default_storage.save(self._path, ContentFile(''))
        self._abs_path = default_storage.path(self.path)
        self._dest = BufferedWriter(FileIO(self._abs_path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()

        context = {'thumbnail_path': self._path, 'file_name': filename, }
        thumbnail = render_to_string('ajaxupload/includes/thumbnail.html', context)
        return {"path": self._path, 'thumbnail': thumbnail}
Esempio n. 35
0
def add_garbage(file: BufferedWriter, total_bytes: int) -> None:
    one_mb_of_data = b"*" * 1024 * 1024
    written = 0
    file.write(b"\n<!-- ")
    while written < total_bytes:
        file.write(one_mb_of_data)
        written += len(one_mb_of_data)
    file.write(b" -->\n")
Esempio n. 36
0
    def _dump(self, data: THM, file: BufferedWriter, *args, **kwargs):
        size_x, size_y = (0, 0)
        if data.lod_images:
            size_x, size_y = data.lod_images[0].size

        file.write(pack(
            '<13I',
            data.unknown_01,
            data.created_at.year,
            data.created_at.month,
            data.created_at.day,
            data.created_at.hour,
            data.created_at.minute,
            data.created_at.second,
            data.created_at.microsecond,
            data.unknown_01,
            data.unknown_02,
            data.unknown_03,
            size_x,
            size_y
        ))

        for lod_image in data.lod_images:
            file.write(lod_image.tobytes())
Esempio n. 37
0
    def dump(self,
             writer: io.BufferedWriter,
             *args,
             strict: bool = STRICT,
             encoding: str = ENCODING,
             **kwargs) -> None:
        _TABLE, _FRAME, _FILESPEC, _END = self._TABLE, self._FRAME, self._FILESPEC, self._END

        if strict and not len(self):
            raise ValueError('Strict: Cannot write CGList with 0 tables')

        for i, table in enumerate(self):
            frameSizes = [(_FRAME.size + len(f) * _FILESPEC.size)
                          for f in table]
            tableSize = _TABLE.size + sum(frameSizes)

            tableNext = tableSize if (i + 1 < len(self)) else 0
            writer.write(_TABLE.pack(tableNext, table.number, len(table)))

            for j, frame in enumerate(table):
                # frameNext is non-zero until the final frame of the final table
                frameNext = frameSizes[j] if (tableNext
                                              or j + 1 < len(table)) else 0

                if tableNext and j + 1 == len(
                        table):  # Account for distance across table headers.
                    ii = i + 1
                    while ii < len(self) and not len(self[ii]):
                        ii += 1
                    if ii == len(self):
                        frameNext = 0  # No more frames after this, it's all empty tables from here.
                    else:
                        frameNext += _TABLE.size * (
                            ii - i
                        )  # Number of empty tables and final non-empty table.

                writer.write(
                    _FRAME.pack(frameNext, frame.name.encode(encoding),
                                len(frame)))

                for k, file in enumerate(frame):
                    writer.write(_FILESPEC.pack(file.encode(encoding)))

        # File always ends with dummy empty table header.
        # No idea why, when we use find-by-offset for tables...
        writer.write(_TABLE.pack(*_END))
Esempio n. 38
0
def stream_noop(instr: io.BufferedReader,
                outstr: io.BufferedWriter,
                chunk_size=None):
    orig_data_size: int = 0
    dest_data_size: int = 0
    data = instr.read(chunk_size)
    while data:
        orig_data_size += len(data)
        # no transformation
        dest_data_size += len(data)

        written = outstr.write(data)
        outstr.flush()

        # detect early if we couldnt write everything
        assert written == len(data)

        # read more data in case it is now available
        data = instr.read(chunk_size)

    return orig_data_size, dest_data_size
Esempio n. 39
0
def _write_header(outfile: io.BufferedWriter, datasize: int):
    """ Writes the 86p header to the outfile.

        Args:
            outfile - Outfile to write to.
            datasize - Size of the ASM program.
            checksum - ASM program checksum
    """

    # Pack the signature, **TI86** plus 3 bytes of magic number.
    out = pack('8s B B B', bytes('**TI86**', 'ascii'), 0x1a, 0x0a, 0x00)
    outfile.write(out)
    # Pack a comment (up to 42 bytes)
    out = pack('42s', bytes('Packed by pack86.py', 'ascii'))
    outfile.write(out)
    # Length of program + 16 for the data header + 4 for the program header.
    outfile.write(pack('H', datasize + 16 + 4))
Esempio n. 40
0
 def _extract(self,
              entry: ArchiveEntry,
              dstfile: io.BufferedWriter,
              arcfile: io.BufferedReader,
              *,
              buffersize: int = 0x4000,
              **kwargs):
     if self._cipher is None or entry.length == 0:
         return super()._extract(entry, dstfile, arcfile)
     #
     arcfile.seek(entry.offset)
     #
     cipher = self._cipher
     length = entry.length
     #
     tmp_len = (buffersize & ~0x7) if buffersize else 0x4000
     if tmp_len <= length:
         tmp_buf = bytearray(length)
         read_len = arcfile.readinto(tmp_buf)
         if read_len < length:
             raise ValueError(
                 'Unexpected end of entry data {0.name!r}'.format(entry))
         self._cipher.decrypt_buffer(tmp_buf, 0, length & ~0x7)
         dstfile.write(tmp_buf)
     else:
         tmp_buf = bytearray(tmp_len)
         read_len = arcfile.readinto(tmp_buf)
         while read_len == tmp_len and length >= tmp_len:
             self._cipher.decrypt_buffer(tmp_buf, 0,
                                         min(read_len, length) & ~0x7)
             dstfile.write(tmp_buf)
             length -= read_len
             read_len = arcfile.readinto(tmp_buf)
         if read_len < length:
             raise ValueError(
                 'Unexpected end of entry data {0.name!r}'.format(entry))
         if length > 0:
             self._cipher.decrypt_buffer(tmp_buf, 0, length & ~0x7)
             dstfile.write(tmp_buf[:length])
Esempio n. 41
0
    def _write_encoded_file_data(cls, file: BufferedReader,
                                 archive_file: BufferedWriter,
                                 encoding_dictionary):
        bit_buffer = bitarray()
        while True:
            byte = file.read(1)
            if not byte:
                break
            bit_buffer.extend(encoding_dictionary[byte[0]])
            while len(bit_buffer) >= cls.data_unit_size * cls.byte:
                archive_file.write(bytes([cls.data_unit_size]))
                archive_file.write(bit_buffer[:cls.data_unit_size *
                                              cls.byte:].tobytes())
                bit_buffer = bit_buffer[cls.data_unit_size * cls.byte::]

        if len(bit_buffer) != 0:
            empty_bits_count = cls.byte - len(bit_buffer) % cls.byte
            if empty_bits_count == cls.byte:
                empty_bits_count = 0
            byte_buffer = bytearray(bit_buffer.tobytes())
            byte_buffer.append(empty_bits_count)

            archive_file.write(cls._compose_data(bytes(byte_buffer)))
Esempio n. 42
0
class LocalUploadBackend(AbstractUploadBackend):
    #UPLOAD_DIR = "uploads"
    # The below key must be synchronized with the implementing project
    # Used to store an array of unclaimed file_pks in the django session
    # So they can be claimed later when the anon user authenticates
    #SESSION_UNCLAIMED_FILES_KEY = KarmaSettings.SESSION_UNCLAIMED_FILES_KEY

    # When a file is uploaded anonymously, 
    # What username should we assign ownership to?
    # This is important because File.save
    # behavior will not set awarded_karma to True 
    # until an owner is assigned who has username != this
    #DEFAULT_UPLOADER_USERNAME = KarmaSettings.DEFAULT_UPLOADER_USERNAME

    def setup(self, filename):
        self._path = os.path.join(
            settings.MEDIA_ROOT, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload(self, uploaded, filename, raw_data):
        """ :raw_data: is 0/1 """
        try:
            if raw_data:
                # File was uploaded via ajax, and is streaming in.
                chunk = uploaded.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self.upload_chunk(chunk)
                    chunk = uploaded.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in uploaded.chunks():
                    self.upload_chunk(chunk)
            return True
        except:
            # things went badly.
            return False

    def upload_complete(self, request, filename, upload):
        path = settings.MEDIA_URL + "/" + filename
        self._dest.close()

        self._dir = settings.MEDIA_ROOT

        # Avoid File.objects.create, as this will try to make
        # Another file copy at FileField's 'upload_to' dir
        print "creating note"
        note = Note()
        note.name = filename
        note.note_file = os.path.join(self._dir, filename)
        note.course_id = request.GET['course_id']
        note.draft = True # Pending approval from user
        print "saving note"
        note.save()

        # FIXME: Make get or create
        print "setting up session vars"
        #import ipdb; ipdb.set_trace()
        if 'uploaded_files' in request.session:
            request.session['uploaded_files'].append(note.pk)
        else:
            request.session['uploaded_files'] = [note.pk]

        # Asynchronously process document with Google Documents API
        print "upload_complete, firing task"
        tasks.process_document.delay(note)

        return {'note_url': note.get_absolute_url()}

    def update_filename(self, request, filename):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        self._dir = settings.MEDIA_ROOT
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        #print "filename all ready exists. Trying  " + filename_no_extension + str(filename_suffix) + extension
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            #print "using filename: " + os.path.join(self._dir, filename)
            return filename
        else:
            #print "using filename: " + filename_no_extension + str(filename_suffix) + extension
            return filename_no_extension + str(filename_suffix) + extension
Esempio n. 43
0
 def write(self, b):
     ret = BufferedWriter.write(self, b)
     self.flush()
     return ret
Esempio n. 44
0
class MyBaseUploadBackend(AbstractUploadBackend):
    def __init__(self, dirname, **kwargs):
        super(MyBaseUploadBackend, self).__init__(**kwargs)
        self.report_id = None

    def set_report_id(self, report_id):
        self.report_id = report_id
        try_number = 1
        while True:
            try:
                self.quast_session = QuastSession.objects.get(report_id=self.report_id)
                return True
            except QuastSession.DoesNotExist:
                logger.error('No quast session with report_id=%s' % self.report_id)
                return False
            except OperationalError:
                logger.error(traceback.format_exc())
                try_number += 1
                logger.error('Retrying. Try number ' + str(try_number))

    def setup(self, filename):
        dirpath = self.quast_session.get_contigs_dirpath()
        logger.info('filename is %s' % filename)
        logger.info('contigs dirpath is %s' % dirpath)

        if not os.path.exists(dirpath):
            logger.error("contigs directory doesn't exist")
            return False

        fpath = os.path.join(dirpath, filename)

        self._path = fpath
        self._dest = BufferedWriter(FileIO(self._path, 'w'))
        return True

    def upload_chunk(self, chunk):
        self._dest.write(chunk)

    def upload_complete(self, request, filename):
        self._dest.close()

        file_index = "%x" % random.getrandbits(128)
        c_fn = ContigsFile(fname=filename, file_index=file_index)
        c_fn.save()
        qc = QuastSession_ContigsFile(contigs_file=c_fn, quast_session=self.quast_session)
        qc.save()

        logger.info('%s' % filename)

        return {
            'file_index': file_index,
        }

    def update_filename(self, request, filename):
        dirpath = self.quast_session.get_contigs_dirpath()
        logger.info('contigs dirpath is %s' % dirpath)

        fpath = os.path.join(dirpath, filename)
        logger.info('file path is %s' % fpath)

        i = 2
        base_fpath = fpath
        base_filename = filename
        while os.path.isfile(fpath):
            fpath = str(base_fpath) + '__' + str(i)
            filename = str(base_filename) + '__' + str(i)
            i += 1

        return filename

    def remove(self, request):
        if 'fileIndex' not in request.GET:
            logger.error('Request.GET must contain "fileIndex"')
            return False, 'Request.GET must contain "fileIndex"'

        file_index = request.GET['fileIndex']
        try:
            contigs_file = self.quast_session.contigs_files.get(file_index=file_index)
        except ContigsFile.DoesNotExist:
            logger.error('No file with such index %d in this quast_session' % file_index)
            return False, 'No file with such index'

        success, msg = self.__remove(contigs_file)
        return success, msg

#        if contigs_file.user_session != self.user_session:
#            logger.error('This file (%s) belongs to session %s, this session is %s'
#                         % (fname, str(contigs_file.user_session ), str(self.user_session.session_key)))
#            return False, 'This file does not belong to this session'


    def __remove(self, contigs_file):
        fname = contigs_file.fname
        contigs_fpath = os.path.join(self.quast_session.get_contigs_dirpath(), fname)

        if os.path.isfile(contigs_fpath):
            try:
                os.remove(contigs_fpath)
            except IOError as e:
                logger.error('IOError when removing "%s", fileIndex=%d": %s' % (fname, file_index, e.message))
                return False, 'Cannot remove file'

        try:
            contigs_file.delete()
        except DatabaseError as e:
            logger.warn('DatabaseError when removing "%s", fileIndex=%d: %s' % (fname, file_index, e.message))
            return False, 'Data base error when removing file'
        except Exception as e:
            logger.error('Exception when removing "%s", fileIndex=%d: %s' % (fname, file_index, e.message))
            return False, 'Data base exception when removing file'

        return True, ''

    def remove_all(self, request):
#        if 'fileNames' not in request.GET:
#            logger.error('remove_all: Request.GET must contain "fileNames"')
#            return False
#
#        file_names_one_string = request.GET['fileNames']
#        file_names = file_names_one_string.split('\n')[:-1]

#        this_user_contigs_files = ContigsFile.objects.filter(user_session=self.user_session)

        logger.info('uploader_backend.remove_all')
        for c_f in self.quast_session.contigs_files.all():
            success, msg = self.__remove(c_f)

        return True

    def get_uploads(self, request):
        contigs_files = self.quast_session.contigs_files.all()

        return [{"fileName": c_f.fname,
                 "fileIndex": c_f.file_index,
                 "file_index": c_f.file_index,
               # "fileSize": c_f.file_size if c_f.file_size else None,
                 } for c_f in contigs_files]
Esempio n. 45
0
class UploadStorage(object):
    BUFFER_SIZE = 10485760  # 10MB

    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        self.size = None

    def set_size(self, width, height):
        self.size = int(width), int(height)
        #logger.debug(self.size)

    def setup(self, filename, upload_to):
        """ Creates the filename on the system, along with the required folder structure. """
        self.filename = filename
        self.upload_to = upload_to
        #logger.debug('File: '+self.filename)
        self._path = self.update_filename()
        #logger.debug('Dir: '+self._dir)
        #logger.debug('Path: '+self._path)
        #logger.debug(os.path.realpath(os.path.dirname(self._path)))
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self):
        path = settings.MEDIA_URL + "/" + self.upload_to + "/" + self.filename
        self._dest.close()
        return {"path": path}

    def update_filename(self):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting

        """
        unique_filename = False
        filename_suffix = 0

        # remove trailing current folder if given
        if self.upload_to[:2] == './':
            self.upload_to = self.upload_to[2:]
        # format upload path with date formats
        self.upload_to = time.strftime(self.upload_to)
        self._dir = os.path.join(settings.MEDIA_ROOT, self.upload_to)
        #logger.debug('Upload to: '+self._dir)

        # Check if file at filename exists)
        if os.path.isfile(os.path.join(self._dir, self.filename)):
            #logger.debug('this file already exists')
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, self.filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(self.filename)
                        open(os.path.join(self._dir, "{}_{}{}".format(filename_no_extension, str(filename_suffix), extension)))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix > 0:
            self.filename = "{}_{}{}".format(filename_no_extension, str(filename_suffix), extension)
        return os.path.join(self._dir, self.filename)

    def upload(self, uploaded, raw_data):
        try:
            if raw_data:
                # File was uploaded via ajax, and is streaming in.
                chunk = uploaded.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self.upload_chunk(chunk)
                    chunk = uploaded.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in uploaded.chunks():
                    self.upload_chunk(chunk)
            # make sure the file is closed
            self._dest.close()
            # file has been uploaded
            self.filename = os.path.join(settings.MEDIA_URL, self.upload_to, self.filename)
            image = Image.open(self._path)
            #logger.debug("{} {} {}".format(image.format, image.size, image.mode))
            # resize image
            if self.size:
                #logger.debug(self.size)
                image = image.convert('RGBA')
                # the image is resized using the minimum dimension
                width, height = self.size
                if image.size[0] < image.size[1]:
                    # the height is bigger than the width
                    # we set the maximum height to the original height,
                    # so that the image fits the width
                    height = image.size[1]
                elif image.size[0] > image.size[1]:
                    # the width is bigger than the height
                    # we set the maximum width to the original width,
                    # so that the image fits the height
                    width = image.size[0]
                else:
                    # we have a square
                    pass
                image.thumbnail((width, height), Image.ANTIALIAS)
                # if the image is not a square, we crop the exceeding width/length as required,
                # to fit the square
                if image.size[0] != image.size[1]:
                    # we crop in the middle of the image
                    if self.size[0] == image.size[0]:
                        # the width fits the container, center the height
                        x0 = 0
                        y0 = (image.size[1] / 2) - (self.size[1] / 2)
                        x1 = self.size[0]
                        y1 = y0 + self.size[1]
                    else:
                        # center the width
                        x0 = (image.size[0] / 2) - (self.size[0] / 2)
                        y0 = 0
                        x1 = x0 + self.size[0]
                        y1 = self.size[1] 
                    box = (x0, y0, x1, y1)
                    region = image.crop(box)
                    background = Image.new('RGBA', size = self.size, color = (255, 255, 255, 0))
                    background.paste(region, (0, 0))
                    #logger.debug("{} {} {}".format(background.format, background.size, background.mode))
                    background.save(self._path)
                else:
                    image.save(self._path)
            return True
        except Exception as e:
            logger.error(e)
            return False
Esempio n. 46
0
class LocalUploadBackend(AbstractUploadBackend):
    UPLOAD_DIR = "uploads"

    def setup(self, filename, *args, **kwargs):
        self._path = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR, filename)
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._dest = BufferedWriter(FileIO(self._path, "w"))

    def upload_chunk(self, chunk, *args, **kwargs):
        self._dest.write(chunk)

    def upload_complete(self, request, filename, *args, **kwargs):
        path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename
        self._dest.close()
        return {"path": path}

    def update_filename(self, request, filename, *args, **kwargs):
        """
        Returns a new name for the file being uploaded.
        Ensure file with name doesn't exist, and if it does,
        create a unique filename to avoid overwriting
        """
        self._dir = os.path.join(
            settings.MEDIA_ROOT, self.UPLOAD_DIR)
        unique_filename = False
        filename_suffix = 0

        # Check if file at filename exists
        if os.path.isfile(os.path.join(self._dir, filename)):
            while not unique_filename:
                try:
                    if filename_suffix == 0:
                        open(os.path.join(self._dir, filename))
                    else:
                        filename_no_extension, extension = os.path.splitext(filename)
                        open(os.path.join(self._dir, filename_no_extension + str(filename_suffix) + extension))
                    filename_suffix += 1
                except IOError:
                    unique_filename = True

        if filename_suffix == 0:
            return filename
        else:
            return filename_no_extension + str(filename_suffix) + extension

    def resize_for_display(self, filename, width, height):
        upload_dir_path = os.path.join(settings.MEDIA_ROOT, self.UPLOAD_DIR) + "/"
        original_path = upload_dir_path + filename
        filename_no_extension, extension = os.path.splitext(filename)
        need_ratio = float(width) / float(height)
        im = Image.open(original_path)
        real_width, real_height = [float(x) for x in im.size]
        real_ratio = real_width / real_height

        if real_width > width or real_height > height:
            if real_ratio > need_ratio:
                displayed_width = width
                displayed_height = int(width / real_ratio)
            else:
                displayed_height = height
                displayed_width = int(height * real_ratio)

            resized_im = im.resize((displayed_width, displayed_height))
            displayed_filename = '%s_displayed%s' % (filename_no_extension, extension)
            resized_im.save(upload_dir_path + displayed_filename)
            displayed_path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + displayed_filename
        else:
            displayed_path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename

        return {'displayed_path': displayed_path, 'true_size': im.size}
Esempio n. 47
0
class BinaryWriter:
    """
    Small utility class to write binary data.
    Also creates a "Memory Stream" if necessary
    """

    def __init__(self, stream=None):
        if not stream:
            stream = BytesIO()

        self.writer = BufferedWriter(stream)
        self.written_count = 0

    # region Writing

    # "All numbers are written as little endian." |> Source: https://core.telegram.org/mtproto
    def write_byte(self, value):
        """Writes a single byte value"""
        self.writer.write(pack('B', value))
        self.written_count += 1

    def write_int(self, value, signed=True):
        """Writes an integer value (4 bytes), which can or cannot be signed"""
        self.writer.write(
            int.to_bytes(
                value, length=4, byteorder='little', signed=signed))
        self.written_count += 4

    def write_long(self, value, signed=True):
        """Writes a long integer value (8 bytes), which can or cannot be signed"""
        self.writer.write(
            int.to_bytes(
                value, length=8, byteorder='little', signed=signed))
        self.written_count += 8

    def write_float(self, value):
        """Writes a floating point value (4 bytes)"""
        self.writer.write(pack('<f', value))
        self.written_count += 4

    def write_double(self, value):
        """Writes a floating point value (8 bytes)"""
        self.writer.write(pack('<d', value))
        self.written_count += 8

    def write_large_int(self, value, bits, signed=True):
        """Writes a n-bits long integer value"""
        self.writer.write(
            int.to_bytes(
                value, length=bits // 8, byteorder='little', signed=signed))
        self.written_count += bits // 8

    def write(self, data):
        """Writes the given bytes array"""
        self.writer.write(data)
        self.written_count += len(data)

    # endregion

    # region Telegram custom writing

    def tgwrite_bytes(self, data):
        """Write bytes by using Telegram guidelines"""
        if len(data) < 254:
            padding = (len(data) + 1) % 4
            if padding != 0:
                padding = 4 - padding

            self.write(bytes([len(data)]))
            self.write(data)

        else:
            padding = len(data) % 4
            if padding != 0:
                padding = 4 - padding

            self.write(bytes([254]))
            self.write(bytes([len(data) % 256]))
            self.write(bytes([(len(data) >> 8) % 256]))
            self.write(bytes([(len(data) >> 16) % 256]))
            self.write(data)

        self.write(bytes(padding))

    def tgwrite_string(self, string):
        """Write a string by using Telegram guidelines"""
        self.tgwrite_bytes(string.encode('utf-8'))

    def tgwrite_bool(self, boolean):
        """Write a boolean value by using Telegram guidelines"""
        #                     boolTrue                boolFalse
        self.write_int(0x997275b5 if boolean else 0xbc799737, signed=False)

    def tgwrite_date(self, datetime):
        """Converts a Python datetime object into Unix time (used by Telegram) and writes it"""
        value = 0 if datetime is None else int(datetime.timestamp())
        self.write_int(value)

    def tgwrite_object(self, tlobject):
        """Writes a Telegram object"""
        tlobject.on_send(self)

    def tgwrite_vector(self, vector):
        """Writes a vector of Telegram objects"""
        self.write_int(0x1cb5c415, signed=False)  # Vector's constructor ID
        self.write_int(len(vector))
        for item in vector:
            self.tgwrite_object(item)

    # endregion

    def flush(self):
        """Flush the current stream to "update" changes"""
        self.writer.flush()

    def close(self):
        """Close the current stream"""
        self.writer.close()

    def get_bytes(self, flush=True):
        """Get the current bytes array content from the buffer, optionally flushing first"""
        if flush:
            self.writer.flush()
        return self.writer.raw.getvalue()

    def get_written_bytes_count(self):
        """Gets the count of bytes written in the buffer.
           This may NOT be equal to the stream length if one was provided when initializing the writer"""
        return self.written_count

    # with block
    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
Esempio n. 48
0
def write(data, wfile: io.BufferedWriter) -> None:
    wfile.write(json.dumps(data).encode() + b"\n")
    wfile.flush()
Esempio n. 49
0
class AjaxUploader(object):
    BUFFER_SIZE = 10485760  # 10MB

    def __init__(self, filetype='file', upload_dir='files', size_limit=10485760):
        self._upload_dir = os.path.join(settings.MEDIA_ROOT, upload_dir, get_date_directory())
        self._filetype = filetype
        if filetype == 'image':
            self._save_format = setting('IMAGE_UPLOAD_FORMAT', 'JPEG')
        else:
            self._save_format = None
        self._size_limit = size_limit

    def max_size(self):
        """
        Checking file max size
        """
        if int(self._destination.tell()) > self._size_limit:
            self._destination.close()
            os.remove(self._path)
            return True

    def setup(self, filename):
        ext = os.path.splitext(filename)[1]
        self._filename = md5(filename.encode('utf8')).hexdigest() + ext
        self._path = os.path.join(self._upload_dir, self._filename)
        # noinspection PyBroadException
        try:
            os.makedirs(os.path.realpath(os.path.dirname(self._path)))
        except:
            pass
        self._destination = BufferedWriter(FileIO(self._path, "w"))

    def handle_upload(self, request):
        is_raw = True
        if request.FILES:
            is_raw = False
            if len(request.FILES) == 1:
                upload = request.FILES.values()[0]
            else:
                return dict(success=False, error=_("Bad upload."))
            filename = upload.name
        else:
            # the file is stored raw in the request
            upload = request
            # get file size
            try:
                filename = request.GET['qqfile']
            except KeyError as aerr:
                return dict(success=False, error=_("Can't read file name"))
        self.setup(filename)
        # noinspection PyBroadException
        try:
            if is_raw:
                # File was uploaded via ajax, and is streaming in.
                chunk = upload.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
                    chunk = upload.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in upload.chunks():
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
        except:
            # things went badly.
            return dict(success=False, error=_("Upload error"))
        self._destination.close()
        if self._filetype == 'image':
            # noinspection PyBroadException
            try:
                i = Image.open(self._path)
            except:
                os.remove(self._path)
                return dict(success=False, error=_("File is not image format"))
            f_name, f_ext = os.path.splitext(self._filename)
            f_without_ext = os.path.splitext(self._path)[0]
            new_path = ".".join([f_without_ext, self._save_format.lower()])
            if setting('IMAGE_STORE_ORIGINAL', False):
                # TODO need change the extension
                orig_path = ".".join([f_without_ext + '_orig', self._save_format.lower()])
                shutil.copy2(self._path, orig_path)
            i.thumbnail((1200, 1200), Image.ANTIALIAS)
            # noinspection PyBroadException
            try:
                if self._path == new_path:
                    i.save(self._path, self._save_format)
                else:
                    i.save(new_path, self._save_format)
                    os.remove(self._path)
                    self._path = new_path
            except:
                # noinspection PyBroadException
                try:
                    os.remove(self._path)
                    os.remove(new_path)
                except:
                    pass
                return dict(success=False, error=_("Error saving image"))
            self._filename = ".".join([f_name, self._save_format.lower()])
        return dict(success=True, fullpath=self._path, path=os.path.relpath(self._path, '/' + settings.MEDIA_ROOT),
                    old_filename=filename, filename=self._filename)
Esempio n. 50
0
def convert_with_google_drive(note):
    """ Upload a local note and download HTML
        using Google Drive
        :note: a File model instance # FIXME
    """
    # TODO: set the permission of the file to permissive so we can use the
    #       gdrive_url to serve files directly to users

    # Get file_type and encoding of uploaded file
    # i.e: file_type = 'text/plain', encoding = None
    (file_type, encoding) = mimetypes.guess_type(note.note_file.path)



    if file_type != None:
        media = MediaFileUpload(note.note_file.path, mimetype=file_type,
                    chunksize=1024*1024, resumable=True)

    else:
        media = MediaFileUpload(note.note_file.path,
                    chunksize=1024*1024, resumable=True)

    auth = DriveAuth.objects.filter(email=GOOGLE_USER).all()[0]
    creds = auth.transform_to_cred()


    creds, auth = check_and_refresh(creds, auth)

    service, http = build_api_service(creds)

    # get the file extension
    filename, extension = os.path.splitext(note.note_file.path)

    file_dict = upload_to_gdrive(service, media, filename, extension)

    content_dict = download_from_gdrive(file_dict, http, extension)

    # Get a new copy of the file from the database with the new metadata from filemeta
    new_note = Note.objects.get(id=note.id)

    if extension.lower() == '.pdf':
        new_note.file_type = 'pdf'

    elif extension.lower() in ['.ppt', '.pptx']:
        new_note.file_type = 'ppt'
        now = datetime.datetime.utcnow()
        # create a folder path to store the ppt > pdf file with year and month folders
        nonce_path = '/ppt_pdf/%s/%s/' % (now.year, now.month)

        _path = filename + '.pdf'
        try:
            # If those folders don't exist, create them
            os.makedirs(os.path.realpath(os.path.dirname(_path)))
        except:
            print "we failed to create those directories"

        _writer = BufferedWriter(FileIO(_path, "w"))
        _writer.write(content_dict['pdf'])
        _writer.close()

        new_note.pdf_file = _path

    else:
        # PPT files do not have this export ability
        new_note.gdrive_url = file_dict[u'exportLinks']['application/vnd.oasis.opendocument.text']
        new_note.html = content_dict['html']

    new_note.text = content_dict['text']

    # before we save new html, sanitize a tags in note.html
    #new_note.sanitize_html(save=False)
    #FIXME: ^^^ disabled until we can get html out of an Etree html element

    # Finally, save whatever data we got back from google
    new_note.save()
Esempio n. 51
0
 def write(self, strng):
   BufferedWriter.write(self,strng.encode('utf-8'))