Esempio n. 1
0
    def oracle_upload_blob(self, cursor, oid, tid, filename):
        """Upload a blob from a file.

        If serial is None, upload to the temporary table.
        """
        if tid is not None:
            if self.keep_history:
                delete_stmt = """
                DELETE FROM blob_chunk
                WHERE zoid = :1 AND tid = :2
                """
                cursor.execute(delete_stmt, (oid, tid))
            else:
                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = :1"
                cursor.execute(delete_stmt, (oid,))

            use_tid = True
            insert_stmt = """
            INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
            VALUES (:oid, :tid, :chunk_num, empty_blob())
            """
            select_stmt = """
            SELECT chunk FROM blob_chunk
            WHERE zoid=:oid AND tid=:tid AND chunk_num=:chunk_num
            """

        else:
            use_tid = False
            delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = :1"
            cursor.execute(delete_stmt, (oid,))

            insert_stmt = """
            INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
            VALUES (:oid, :chunk_num, empty_blob())
            """
            select_stmt = """
            SELECT chunk FROM temp_blob_chunk
            WHERE zoid=:oid AND chunk_num=:chunk_num
            """

        f = open(filename, 'rb')
        # Current versions of cx_Oracle only support offsets up
        # to sys.maxint or 4GB, whichever comes first. We divide up our
        # upload into chunks within this limit.
        maxsize = min(sys.maxint, 1<<32)
        try:
            chunk_num = 0
            while True:
                blob = None
                params = dict(oid=oid, chunk_num=chunk_num)
                if use_tid:
                    params['tid'] = tid
                cursor.execute(insert_stmt, params)
                cursor.execute(select_stmt, params)
                blob, = cursor.fetchone()
                blob.open()
                write_chunk_size = int(max(round(
                    1.0 * self.blob_chunk_size / blob.getchunksize()), 1) *
                    blob.getchunksize())
                offset = 1 # Oracle still uses 1-based indexing.
                for _i in xrange(int(maxsize / write_chunk_size)):
                    write_chunk = f.read(write_chunk_size)
                    if not blob.write(write_chunk, offset):
                        # EOF.
                        return
                    offset += len(write_chunk)
                if blob is not None and blob.isopen():
                    blob.close()
                chunk_num += 1
        finally:
            f.close()
            if blob is not None and blob.isopen():
                blob.close()
Esempio n. 2
0
    def oracle_upload_blob(self, cursor, oid, tid, filename):
        """Upload a blob from a file.

        If serial is None, upload to the temporary table.
        """
        if tid is not None:
            if self.keep_history:
                delete_stmt = """
                DELETE FROM blob_chunk
                WHERE zoid = :1 AND tid = :2
                """
                cursor.execute(delete_stmt, (oid, tid))
            else:
                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = :1"
                cursor.execute(delete_stmt, (oid, ))

            use_tid = True
            insert_stmt = """
            INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
            VALUES (:oid, :tid, :chunk_num, empty_blob())
            """
            select_stmt = """
            SELECT chunk FROM blob_chunk
            WHERE zoid=:oid AND tid=:tid AND chunk_num=:chunk_num
            """

        else:
            use_tid = False
            delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = :1"
            cursor.execute(delete_stmt, (oid, ))

            insert_stmt = """
            INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
            VALUES (:oid, :chunk_num, empty_blob())
            """
            select_stmt = """
            SELECT chunk FROM temp_blob_chunk
            WHERE zoid=:oid AND chunk_num=:chunk_num
            """

        f = open(filename, 'rb')
        # Current versions of cx_Oracle only support offsets up
        # to sys.maxint or 4GB, whichever comes first. We divide up our
        # upload into chunks within this limit.
        maxsize = min(sys.maxint, 1 << 32)
        try:
            chunk_num = 0
            while True:
                blob = None
                params = dict(oid=oid, chunk_num=chunk_num)
                if use_tid:
                    params['tid'] = tid
                cursor.execute(insert_stmt, params)
                cursor.execute(select_stmt, params)
                blob, = cursor.fetchone()
                blob.open()
                write_chunk_size = int(
                    max(
                        round(1.0 * self.blob_chunk_size /
                              blob.getchunksize()), 1) * blob.getchunksize())
                offset = 1  # Oracle still uses 1-based indexing.
                for _i in xrange(int(maxsize / write_chunk_size)):
                    write_chunk = f.read(write_chunk_size)
                    if not blob.write(write_chunk, offset):
                        # EOF.
                        return
                    offset += len(write_chunk)
                if blob is not None and blob.isopen():
                    blob.close()
                chunk_num += 1
        finally:
            f.close()
            if blob is not None and blob.isopen():
                blob.close()
Esempio n. 3
0
    def postgresql_upload_blob(self, cursor, oid, tid, filename):
        """Upload a blob from a file.

        If serial is None, upload to the temporary table.
        """
        if tid is not None:
            if self.keep_history:
                delete_stmt = """
                DELETE FROM blob_chunk
                WHERE zoid = %s AND tid = %s
                """
                cursor.execute(delete_stmt, (oid, tid))
            else:
                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = %s"
                cursor.execute(delete_stmt, (oid,))

            use_tid = True
            insert_stmt = """
            INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
            VALUES (%(oid)s, %(tid)s, %(chunk_num)s, %(loid)s)
            """

        else:
            use_tid = False
            delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = %s"
            cursor.execute(delete_stmt, (oid,))

            insert_stmt = """
            INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
            VALUES (%(oid)s, %(chunk_num)s, %(loid)s)
            """

        blob = None
        # PostgreSQL only supports up to 2GB of data per BLOB.
        maxsize = 1<<31
        filesize = os.path.getsize(filename)

        if filesize <= maxsize:
            # File is small enough to fit in one chunk, just use
            # psycopg2 native file copy support
            blob = cursor.connection.lobject(0, 'wb', 0, filename)
            blob.close()
            params = dict(oid=oid, chunk_num=0, loid=blob.oid)
            if use_tid:
                params['tid'] = tid
            cursor.execute(insert_stmt, params)
            return

        # We need to divide this up into multiple chunks
        f = open(filename, 'rb')
        try:
            chunk_num = 0
            while True:
                blob = cursor.connection.lobject(0, 'wb')
                params = dict(oid=oid, chunk_num=chunk_num, loid=blob.oid)
                if use_tid:
                    params['tid'] = tid
                cursor.execute(insert_stmt, params)

                write_chunk_size = self.blob_chunk_size
                for _i in xrange(int(maxsize / write_chunk_size)):
                    write_chunk = f.read(write_chunk_size)
                    if not blob.write(write_chunk):
                        # EOF.
                        return
                if not blob.closed:
                    blob.close()
                chunk_num += 1
        finally:
            f.close()
            if blob is not None and not blob.closed:
                blob.close()
Esempio n. 4
0
    def postgresql_upload_blob(self, cursor, oid, tid, filename):
        """Upload a blob from a file.

        If serial is None, upload to the temporary table.
        """
        if tid is not None:
            if self.keep_history:
                delete_stmt = """
                DELETE FROM blob_chunk
                WHERE zoid = %s AND tid = %s
                """
                cursor.execute(delete_stmt, (oid, tid))
            else:
                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = %s"
                cursor.execute(delete_stmt, (oid, ))

            use_tid = True
            insert_stmt = """
            INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
            VALUES (%(oid)s, %(tid)s, %(chunk_num)s, %(loid)s)
            """

        else:
            use_tid = False
            delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = %s"
            cursor.execute(delete_stmt, (oid, ))

            insert_stmt = """
            INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
            VALUES (%(oid)s, %(chunk_num)s, %(loid)s)
            """

        blob = None
        # PostgreSQL only supports up to 2GB of data per BLOB.
        maxsize = 1 << 31
        filesize = os.path.getsize(filename)

        if filesize <= maxsize:
            # File is small enough to fit in one chunk, just use
            # psycopg2 native file copy support
            blob = cursor.connection.lobject(0, 'wb', 0, filename)
            blob.close()
            params = dict(oid=oid, chunk_num=0, loid=blob.oid)
            if use_tid:
                params['tid'] = tid
            cursor.execute(insert_stmt, params)
            return

        # We need to divide this up into multiple chunks
        f = open(filename, 'rb')
        try:
            chunk_num = 0
            while True:
                blob = cursor.connection.lobject(0, 'wb')
                params = dict(oid=oid, chunk_num=chunk_num, loid=blob.oid)
                if use_tid:
                    params['tid'] = tid
                cursor.execute(insert_stmt, params)

                write_chunk_size = self.blob_chunk_size
                for _i in xrange(int(maxsize / write_chunk_size)):
                    write_chunk = f.read(write_chunk_size)
                    if not blob.write(write_chunk):
                        # EOF.
                        return
                if not blob.closed:
                    blob.close()
                chunk_num += 1
        finally:
            f.close()
            if blob is not None and not blob.closed:
                blob.close()