Beispiel #1
0
    def packing(self, src_tensor):
        src_tensor = src_tensor.to(self.device)
        self.compression_python.set()
        self.compression_specific.set()
        #torch.cuda.synchronize()
        self.compression_specific.record()
        src_tensor = torch.sign(src_tensor)
        src_tensor_size = src_tensor.size()
        src_tensor = src_tensor.view(-1)
        src_len = len(src_tensor)
        add_elm = 32 - (src_len % 32)
        if src_len % 32 == 0:
            add_elm = 0
        new_tensor = torch.zeros([add_elm],
                                 dtype=torch.float32,
                                 device=self.device)
        src_tensor = torch.cat((src_tensor, new_tensor), 0)
        src_tensor = src_tensor.view(32, -1)
        src_tensor = src_tensor.to(dtype=torch.int32)
        #torch.cuda.synchronize()
        self.compression_python.record()
        self.compression_cuda.set()
        #torch.cuda.synchronize()
        dst_tensor = bit2byte.packing(src_tensor)
        dst_tensor = dst_tensor.to(dtype=torch.int32)
        #torch.cuda.synchronize()
        self.compression_cuda.record()

        return dst_tensor.to(self.source_device), src_tensor_size
Beispiel #2
0
    def majority_vote(self, src_tensor_list):
        self.majority_vote_decompression.set()
        #torch.cuda.synchronize()
        voter_num = len(src_tensor_list)
        src_tensor = torch.stack(src_tensor_list)
        src_tensor = src_tensor.view(-1)
        full_size = 32 * len(src_tensor)
        new_tensor = torch.ones(full_size, device=self.device, dtype=torch.int32)
        new_tensor = new_tensor.view(32,-1)
        new_tensor = bit2byte.unpacking(src_tensor,new_tensor)
        new_tensor = - new_tensor.add_(-1)
        #torch.cuda.synchronize()
        self.majority_vote_decompression.record()

        #sum
        self.majority_vote_sum_calculation.set()
        #torch.cuda.synchronize()
        new_tensor = new_tensor.permute(1,0).contiguous().view(voter_num,-1)
        new_tensor = torch.sum(new_tensor,0)
        new_tensor = new_tensor.view(-1,32).permute(1,0)
        #torch.cuda.synchronize()
        self.majority_vote_sum_calculation.record()

        self.majority_vote_compression.set()
        #torch.cuda.synchronize()
        new_tensor = torch.sign(new_tensor)
        new_tensor = bit2byte.packing(new_tensor)
        new_tensor = new_tensor.to(dtype=torch.int32)
        #torch.cuda.synchronize()
        self.majority_vote_compression.record()
        return new_tensor
Beispiel #3
0
 def packing(self, src_tensor):
     src_tensor = torch.sign(src_tensor)
     src_tensor_size = src_tensor.size()
     src_tensor = src_tensor.view(-1)
     src_len = len(src_tensor)
     add_elm = 32 - (src_len % 32)
     if src_len % 32 == 0:
         add_elm = 0
     new_tensor = torch.zeros([add_elm], dtype=torch.float32, device=src_tensor.device)
     src_tensor = torch.cat((src_tensor, new_tensor), 0)
     src_tensor = src_tensor.view(32, -1)
     src_tensor = src_tensor.to(dtype=torch.int32)
     dst_tensor = bit2byte.packing(src_tensor)
     dst_tensor = dst_tensor.to(dtype=torch.int32)
     return dst_tensor, src_tensor_size
Beispiel #4
0
 def majority_vote(self, src_tensor_list):
     voter_num = len(src_tensor_list)
     src_tensor = torch.stack(src_tensor_list)
     src_tensor = src_tensor.view(-1)
     full_size = 32 * len(src_tensor)
     new_tensor = torch.ones(full_size, device=src_tensor.device, dtype=torch.int32)
     new_tensor = new_tensor.view(32, -1)
     new_tensor = bit2byte.unpacking(src_tensor, new_tensor)
     new_tensor = -new_tensor.add_(-1)
     # sum
     new_tensor = new_tensor.permute(1, 0).contiguous().view(voter_num, -1)
     new_tensor = torch.sum(new_tensor, 0)
     new_tensor = new_tensor.view(-1, 32).permute(1, 0)
     new_tensor = torch.sign(new_tensor)
     new_tensor = bit2byte.packing(new_tensor)
     new_tensor = new_tensor.to(dtype=torch.int32)
     return new_tensor