def enq_file(self, f): """ f[0] path f[1] mode f[2] size - we enq all in one shot CMD = copy src dest off_start last_chunk """ chunks = f.st_size / self.chunksize remaining = f.st_size % self.chunksize workcnt = 0 if f.st_size == 0: # empty file ck = ChunkSum(f.path) self.enq(ck) self.logger.debug("%s" % ck, extra=self.d) workcnt += 1 else: for i in range(chunks): ck = ChunkSum(f.path) ck.offset = i * self.chunksize ck.length = self.chunksize self.enq(ck) self.logger.debug("%s" % ck, extra=self.d) workcnt += chunks if remaining > 0: # send remainder ck = ChunkSum(f.path) ck.offset = chunks * self.chunksize ck.length = remaining self.enq(ck) self.logger.debug("%s" % ck, extra=self.d) workcnt += 1 # tally work cnt self.workcnt += workcnt
def write_bytes(self, rfd, wfd, work): os.lseek(rfd, work.offset, os.SEEK_SET) os.lseek(wfd, work.offset, os.SEEK_SET) m = None if self.verify: m = hashlib.sha1() remaining = work.length while remaining != 0: if remaining >= self.blocksize: self.read_then_write(rfd, wfd, work, self.blocksize, m) remaining -= self.blocksize else: self.read_then_write(rfd, wfd, work, remaining, m) remaining = 0 if self.verify: # use src path here ck = ChunkSum(work.dest, offset=work.offset, length=work.length, digest=m.hexdigest()) if len(self.chunksums_mem) < G.memitem_threshold: self.chunksums_mem.append(ck) else: self.chunksums_buf.append(ck) if len(self.chunksums_buf) == G.DB_BUFSIZE: if self.use_store == False: self.workdir = os.getcwd() self.chunksums_dbname = "%s/chunksums.%s" % (G.tempdir, self.circle.rank) self.chunksums_db = DbStore(dbname=self.chunksums_dbname) self.use_store = True self.chunksums_db.mput(self.chunksums_buf) del self.chunksums_buf[:]
def gen_chunksum(b, sig): b = os.path.relpath(b, sig.prefix) try: fn, offset, length, digest = b.split("!@") except ValueError as e: print("Parsing error: %s" % b) sys.exit(1) c = ChunkSum(fn) c.offset = int(offset) c.length = int(length) c.digest = digest.strip() return c