def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' if setting.healthynode == setting.totalnode: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(metadata.totalchunk): chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str(i) src.append(chunkname) codingutil.join(src, dest, metadata.filesize) else: return False
def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' if setting.healthynode == setting.totalnode: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(metadata.totalchunk): chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str( i) src.append(chunkname) codingutil.join(src, dest, metadata.filesize) else: return False
def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum if setting.healthynode >= setting.totalnode-1: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(nativeBlockNum): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str(i) src.append(chunkname) if mode == 'download': codingutil.join(src, dest, metadata.filesize) elif setting.healthynode >= metadata.datanode: #Case of two or more node failure: dest = setting.mirrordir + '/' + metadata.filename n = int(metadata.totalchunk/2) k = nativeBlockNum src = [] blocknums = [] chunklist = [] failedchunks = [] failedchunktype = [] for i in range(int(metadata.totalchunk/2)): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str(i) chunktype = metadata.chunkInfo[i].chunktype authenticChunkAction = metadata.chunkInfo[i].action replicaChunkAction = metadata.chunkInfo[i+authenticNum].action chunklist.append(chunkname) if authenticChunkAction == 'sos' and replicaChunkAction == 'sos': failedchunktype.append(chunktype) failedchunks.append(i) elif authenticChunkAction == 'download' or replicaChunkAction == 'download': src.append(chunkname) blocknums.append(i) if mode == 'download' and ('native' not in failedchunktype): codingutil.join(src, dest, metadata.filesize) elif mode == 'download' and ('native' in failedchunktype): codingutil.ecDecodeFile(n, k, src, blocknums, dest, metadata.filesize, setting) elif mode == 'repair' and ('native' in failedchunktype): codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks) elif mode == 'repair': codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks, nativeFailed=False) else: return False return True else: return False
def repairFile(settingOld, settingNew, metadata): '''Repair file by rebuilding failed chunks.''' #Decode chunk for two or more node failure: if settingOld.healthynode <= settingOld.totalnode - 2: decodeFile(settingOld, metadata, 'repair') n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum #Paths of chunk files for generating a big-chunk: bigChunkPaths = [] for i in range(metadata.totalnode): bigChunkPaths.append([]) #Find replicas of failed chunks: for i in range(len(metadata.chunkInfo)): if i < authenticNum: chunkname = metadata.filename + '.chunk' + str(i) else: chunkname = metadata.filename + '.chunk' + str(i - authenticNum) src = settingNew.chunkdir + '/' + chunkname if metadata.chunkInfo[i].action == 'sos': nodeid = metadata.chunkInfo[i].nodeid metadata.chunkInfo[i].chunkname = chunkname metadata.chunkInfo[i].chunkpath = src metadata.chunkInfo[i].nodekey = settingNew.nodeInfo[nodeid].nodekey metadata.chunkInfo[i].nodetype = settingNew.nodeInfo[ nodeid].nodetype metadata.chunkInfo[i].bucketname = settingNew.nodeInfo[ nodeid].bucketname metadata.chunkInfo[i].action = 'upload' #Add chunk path to bigChunkPaths: bigChunkPaths[nodeid].append(src) else: metadata.chunkInfo[i].chunkname = chunkname metadata.chunkInfo[i].chunkpath = chunkname #Add support for big-chunk: for i in range(metadata.totalnode): if settingOld.nodeInfo[i].healthy == False: dest = settingOld.chunkdir + '/' + metadata.filename + '.node' + str( i) codingutil.join(bigChunkPaths[i], dest, metadata.fileNodeInfo[i].bigchunksize) metadata.fileNodeInfo[i].bigchunkpath = dest metadata.fileNodeInfo[ i].bigchunkname = metadata.filename + '.node' + str(i) metadata.fileNodeInfo[i].action = 'upload'
def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' if setting.healthynode == setting.totalnode: #Case of no failure: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(metadata.datanode): chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str(i) src.append(chunkname) codingutil.join(src, dest, metadata.filesize) elif setting.healthynode >= metadata.datanode: #Case of one-node or two-node failure: dest = setting.mirrordir + '/' + metadata.filename n = metadata.totalnode k = metadata.datanode src = [] blocknums = [] chunklist = [] failedchunks = [] failedchunkname = [] failedchunktype = [] for i in range(metadata.totalchunk): nodeid = metadata.chunkInfo[i].nodeid chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str(i) chunktype = metadata.chunkInfo[i].chunktype chunklist.append(chunkname) if metadata.chunkInfo[i].action == 'download': src.append(chunkname) blocknums.append(i) else: failedchunkname.append(chunkname) failedchunktype.append(chunktype) if metadata.chunkInfo[i].action == 'sos': failedchunks.append(i) if mode == 'download' and ('native' not in failedchunktype): codingutil.join(src, dest, metadata.filesize) elif mode == 'download' and ('native' in failedchunktype): codingutil.ecDecodeFile(n, k, src, blocknums, dest, metadata.filesize, setting) elif mode == 'repair' and ('native' in failedchunktype): codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks) elif mode == 'repair': codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks, nativeFailed=False) else: return False return True else: return False
def repairFile(settingOld, settingNew, metadata): '''Repair file by rebuilding failed chunks.''' #Decode chunk for two or more node failure: if settingOld.healthynode <= settingOld.totalnode - 2: decodeFile(settingOld, metadata, 'repair') n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum #Paths of chunk files for generating a big-chunk: bigChunkPaths = [] for i in range(metadata.totalnode): bigChunkPaths.append([]) #Find replicas of failed chunks: for i in range(len(metadata.chunkInfo)): if i < authenticNum: chunkname = metadata.filename + '.chunk' + str(i) else: chunkname = metadata.filename + '.chunk' + str(i-authenticNum) src = settingNew.chunkdir + '/' + chunkname if metadata.chunkInfo[i].action == 'sos': nodeid = metadata.chunkInfo[i].nodeid metadata.chunkInfo[i].chunkname = chunkname metadata.chunkInfo[i].chunkpath = src metadata.chunkInfo[i].nodekey = settingNew.nodeInfo[nodeid].nodekey metadata.chunkInfo[i].nodetype = settingNew.nodeInfo[nodeid].nodetype metadata.chunkInfo[i].bucketname = settingNew.nodeInfo[nodeid].bucketname metadata.chunkInfo[i].action = 'upload' #Add chunk path to bigChunkPaths: bigChunkPaths[nodeid].append(src) else: metadata.chunkInfo[i].chunkname = chunkname metadata.chunkInfo[i].chunkpath = chunkname #Add support for big-chunk: for i in range(metadata.totalnode): if settingOld.nodeInfo[i].healthy == False: dest = settingOld.chunkdir + '/' + metadata.filename + '.node' + str(i) codingutil.join(bigChunkPaths[i],dest,metadata.fileNodeInfo[i].bigchunksize) metadata.fileNodeInfo[i].bigchunkpath = dest metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i) metadata.fileNodeInfo[i].action = 'upload'
def encodeFile(setting, metadata): '''Encode file into chunks.''' metadata.datanode = setting.datanode src = setting.mirrordir + '/' + metadata.filename dest = [] dataChunks = [] parityChunks = [] n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum nodeIdList = embrutil.getNodeIdList(n, k) for i in range(nativeBlockNum): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str( i) dataChunks.append(chunkname) dest.append(chunkname) for i in range(parityBlockNum): parityname = setting.chunkdir + '/'+ metadata.filename + '.chunk' \ + str(authenticNum-parityBlockNum+i) parityChunks.append(parityname) dest.append(parityname) codingutil.ecEncode(src, dataChunks, parityChunks, setting) #Generate info for big-chunk: bigChunkPaths = [] for i in range(metadata.totalnode): fileNode = common.FileNodeMetadata(i) fileNode.nodekey = setting.nodeInfo[i].nodekey fileNode.nodetype = setting.nodeInfo[i].nodetype fileNode.bucketname = setting.nodeInfo[i].bucketname fileNode.bigchunksize = 0 fileNode.chunknum = 0 metadata.fileNodeInfo.append(fileNode) bigChunkPaths.append([]) #Generate info for small chunks: for i in range(authenticNum * 2): chunk = common.ChunkMetadata(i) if i < authenticNum: #Case of non-replica: chunk.chunkname = metadata.filename + '.chunk' + str(i) chunk.chunksize = os.path.getsize(dest[i]) if i < nativeBlockNum: chunk.chunktype = 'native' else: chunk.chunktype = 'parity' chunk.chunkpath = dest[i] else: #Case of replica: j = i - authenticNum chunk.chunkname = metadata.chunkInfo[j].chunkname chunk.chunksize = metadata.chunkInfo[j].chunksize chunk.chunktype = 'replica' chunk.chunkpath = metadata.chunkInfo[j].chunkpath nodeid = nodeIdList[i] chunk.nodeid = nodeid chunk.nodekey = setting.nodeInfo[nodeid].nodekey chunk.nodetype = setting.nodeInfo[nodeid].nodetype chunk.bucketname = setting.nodeInfo[nodeid].bucketname chunk.action = 'upload' #Add chunk position inside big-chunk: chunk.position = metadata.fileNodeInfo[nodeid].chunknum metadata.chunkInfo.append(chunk) #Add support for big-chunk: metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize metadata.fileNodeInfo[nodeid].chunknum += 1 bigChunkPaths[nodeid].append(chunk.chunkpath) metadata.totalchunk = authenticNum * 2 #Generate big-chunks for i in range(metadata.totalnode): dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i) codingutil.join(bigChunkPaths[i], dest, metadata.fileNodeInfo[i].bigchunksize) metadata.fileNodeInfo[i].bigchunkpath = dest metadata.fileNodeInfo[ i].bigchunkname = metadata.filename + '.node' + str(i) metadata.fileNodeInfo[i].action = 'upload'
def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum if setting.healthynode >= setting.totalnode - 1: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(nativeBlockNum): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str( i) src.append(chunkname) if mode == 'download': codingutil.join(src, dest, metadata.filesize) elif setting.healthynode >= metadata.datanode: #Case of two or more node failure: dest = setting.mirrordir + '/' + metadata.filename n = int(metadata.totalchunk / 2) k = nativeBlockNum src = [] blocknums = [] chunklist = [] failedchunks = [] failedchunktype = [] for i in range(int(metadata.totalchunk / 2)): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str( i) chunktype = metadata.chunkInfo[i].chunktype authenticChunkAction = metadata.chunkInfo[i].action replicaChunkAction = metadata.chunkInfo[i + authenticNum].action chunklist.append(chunkname) if authenticChunkAction == 'sos' and replicaChunkAction == 'sos': failedchunktype.append(chunktype) failedchunks.append(i) elif authenticChunkAction == 'download' or replicaChunkAction == 'download': src.append(chunkname) blocknums.append(i) if mode == 'download' and ('native' not in failedchunktype): codingutil.join(src, dest, metadata.filesize) elif mode == 'download' and ('native' in failedchunktype): codingutil.ecDecodeFile(n, k, src, blocknums, dest, metadata.filesize, setting) elif mode == 'repair' and ('native' in failedchunktype): codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks) elif mode == 'repair': codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks, nativeFailed=False) else: return False return True else: return False
def decodeFile(setting, metadata, mode): '''Decode chunks into original file.''' if setting.healthynode == setting.totalnode: #Case of no failure: dest = setting.mirrordir + '/' + metadata.filename src = [] for i in range(metadata.datanode): chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str( i) src.append(chunkname) codingutil.join(src, dest, metadata.filesize) elif setting.healthynode >= metadata.datanode: #Case of one-node or two-node failure: dest = setting.mirrordir + '/' + metadata.filename n = metadata.totalnode k = metadata.datanode src = [] blocknums = [] chunklist = [] failedchunks = [] failedchunkname = [] failedchunktype = [] for i in range(metadata.totalchunk): nodeid = metadata.chunkInfo[i].nodeid chunkname = setting.chunkdir + '/' + metadata.filename + '.node' + str( i) chunktype = metadata.chunkInfo[i].chunktype chunklist.append(chunkname) if metadata.chunkInfo[i].action == 'download': src.append(chunkname) blocknums.append(i) else: failedchunkname.append(chunkname) failedchunktype.append(chunktype) if metadata.chunkInfo[i].action == 'sos': failedchunks.append(i) if mode == 'download' and ('native' not in failedchunktype): codingutil.join(src, dest, metadata.filesize) elif mode == 'download' and ('native' in failedchunktype): codingutil.ecDecodeFile(n, k, src, blocknums, dest, metadata.filesize, setting) elif mode == 'repair' and ('native' in failedchunktype): codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks) elif mode == 'repair': codingutil.ecDecodeChunks(n, k, src, blocknums, chunklist, setting, failed=failedchunks, nativeFailed=False) else: return False return True else: return False
def encodeFile(setting, metadata): '''Encode file into chunks.''' metadata.datanode = setting.datanode src = setting.mirrordir + '/' + metadata.filename dest = [] dataChunks = [] parityChunks = [] n = metadata.totalnode k = metadata.datanode nativeBlockNum = embrutil.getNativeBlockNum(n, k) parityBlockNum = embrutil.getParityBlockNum(n, k) authenticNum = nativeBlockNum + parityBlockNum nodeIdList = embrutil.getNodeIdList(n, k) for i in range(nativeBlockNum): chunkname = setting.chunkdir + '/' + metadata.filename + '.chunk' + str(i) dataChunks.append(chunkname) dest.append(chunkname) for i in range(parityBlockNum): parityname = setting.chunkdir + '/'+ metadata.filename + '.chunk' \ + str(authenticNum-parityBlockNum+i) parityChunks.append(parityname) dest.append(parityname) codingutil.ecEncode(src, dataChunks, parityChunks, setting) #Generate info for big-chunk: bigChunkPaths = [] for i in range(metadata.totalnode): fileNode = common.FileNodeMetadata(i) fileNode.nodekey = setting.nodeInfo[i].nodekey fileNode.nodetype = setting.nodeInfo[i].nodetype fileNode.bucketname = setting.nodeInfo[i].bucketname fileNode.bigchunksize = 0 fileNode.chunknum = 0 metadata.fileNodeInfo.append(fileNode) bigChunkPaths.append([]) #Generate info for small chunks: for i in range(authenticNum*2): chunk = common.ChunkMetadata(i) if i < authenticNum: #Case of non-replica: chunk.chunkname = metadata.filename + '.chunk' + str(i) chunk.chunksize = os.path.getsize(dest[i]) if i < nativeBlockNum: chunk.chunktype = 'native' else: chunk.chunktype = 'parity' chunk.chunkpath = dest[i] else: #Case of replica: j = i - authenticNum chunk.chunkname = metadata.chunkInfo[j].chunkname chunk.chunksize = metadata.chunkInfo[j].chunksize chunk.chunktype = 'replica' chunk.chunkpath = metadata.chunkInfo[j].chunkpath nodeid = nodeIdList[i] chunk.nodeid = nodeid chunk.nodekey = setting.nodeInfo[nodeid].nodekey chunk.nodetype = setting.nodeInfo[nodeid].nodetype chunk.bucketname = setting.nodeInfo[nodeid].bucketname chunk.action = 'upload' #Add chunk position inside big-chunk: chunk.position = metadata.fileNodeInfo[nodeid].chunknum metadata.chunkInfo.append(chunk) #Add support for big-chunk: metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize metadata.fileNodeInfo[nodeid].chunknum += 1 bigChunkPaths[nodeid].append(chunk.chunkpath) metadata.totalchunk = authenticNum * 2 #Generate big-chunks for i in range(metadata.totalnode): dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i) codingutil.join(bigChunkPaths[i],dest,metadata.fileNodeInfo[i].bigchunksize) metadata.fileNodeInfo[i].bigchunkpath = dest metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i) metadata.fileNodeInfo[i].action = 'upload'