Esempio n. 1
0
 def upload_local_file(self, file_path, obj_path):
     hasher = hashlib.md5()
     try:
         key = self.bucket.new_key(obj_path)
         mp = self.bucket.initiate_multipart_upload(obj_path)
         chunk_size = 5 * 2**20
         file_size = os.stat(file_path).st_size
         # s3 multipart upload should be larger than 5MB
         chunk_count = int(math.ceil(file_size / float(chunk_size)))
         for i in range(chunk_count):
             offset = chunk_size * i
             bytes = min(chunk_size, file_size - offset)
             with filechunkio.FileChunkIO(file_path,
                                          'r',
                                          offset=offset,
                                          bytes=bytes) as fp:
                 mp.upload_part_from_file(fp, part_num=i + 1)
         mp.complete_upload()
         self.bucket.set_acl('public-read', obj_path)
         if self.check_file_download(obj_path):
             return hasher.hexdigest()
     except S3ResponseError:
         logger.exception("Could not upload key '%s' to S3", obj_path)
     except Exception, ex:
         logger.exception("Could not read source to key '%s' to S3: %s" %
                          (obj_path, ex))
Esempio n. 2
0
    def put(self):
        multipartupload = self.bucket.initiate_multipart_upload(self.keyname)
        chunk_count = int(math.ceil(self.filesize / CONFIG['CHUNKUP']))

        widgets = [
            self.message, " ",
            Bar(marker='#', left='[', right=']'), " ",
            ETA()
        ]

        self.pbar = ProgressBar(widgets=widgets,
                                maxval=self.filesize,
                                term_width=80)

        self.pbar.start()
        for i in range(chunk_count + 1):
            offset = CONFIG['CHUNKUP'] * i
            bytes = min(CONFIG['CHUNKUP'], self.filesize - offset)
            with filechunkio.FileChunkIO(self.filename,
                                         'rb',
                                         offset=offset,
                                         bytes=bytes) as fp:
                multipartupload.upload_part_from_file(fp,
                                                      part_num=i + 1,
                                                      cb=self._dl_progress_cb,
                                                      num_cb=100)

        self.pbar.finish()
        multipartupload.complete_upload()
def client_handler(soc, file_name, file_size, file_type):

    with soc, open(file_name, 'rb') as f:
        soc.send(f"{file_name}{GAP}{file_size}{GAP}{file_type}".encode())

        if file_size <= GB:
           while True:
                fsend = f.read(MB)
                if not fsend:
                    break
                soc.sendall(fsend)


        else:

            num = 0
            threads = []

            while True:

                offset = chunk * num
                if offset >= file_size:
                    break
                buff = min(chunk, file_size - offset)
                buff = int(buff)
                size = offset + buff

                fp = filechunkio.FileChunkIO(file_name, 'rb', offset=int(offset), bytes=buff)
                chunk_id = num
                num += 1
                t = threading.Thread(target=sendfile, args=(soc, fp, buff, chunk_id,))
                threads.append(t)
                t.setDaemon(True)
                t.start()


            mthread = threading.current_thread()
            for thread in threads:
                if thread is mthread:
                    continue
                thread.join()