示例#1
0
def test_eof_cut_off():
    # Using frequency table that should give this encoding
    # A   -> 0
    # B   -> 11
    # C   -> 101
    # EOF -> 100
    codec = HuffmanCodec.from_frequencies({
        'A': 5,
        'B': 4,
        'C': 2,
    })
    cases = {
        # Straightforward cases
        '': 0,
        'A': 1,
        'AB': 1,
        'ABB': 1,
        'CCC': 2,
        # Cases where EOF cut-off saves one output byte
        'ACC': 1,
        'CC': 1,
        'CCCCC': 2,
    }
    for data, expected_length in cases.items():
        encoded = codec.encode(data)
        assert len(encoded) == expected_length
        assert data == codec.decode(encoded)
    def huffmanCoding(self, model, quantized, k):
        modelSize = 0
        compressedModelSize = 0

        for layer in model.layers:
            if not layer.weights:
                continue
            else:
                length = 1
                shape = layer.weights[0].shape
                for l in shape:
                    length = l * length

                w_vec = np.reshape(layer.weights[0].numpy(), (1, length))[0]

                codec = HuffmanCodec.from_data(w_vec[k:])
                encoded = codec.encode(w_vec[k:])
                compressedModelSize = compressedModelSize + len(encoded)

                if quantized:
                    modelSize = modelSize + ((len(w_vec) - k) * quantized) // 8
                else:
                    modelSize = modelSize + (len(w_vec) - k) * 4

        self.modelSize = modelSize
        self.compressedSize = compressedModelSize
示例#3
0
def test_string_data(data):
    codec = HuffmanCodec.from_data(data)
    encoded = codec.encode(data)
    assert type(encoded) == type(b'')
    assert len(encoded) < len(data)
    decoded = codec.decode(encoded)
    assert decoded == data
def huffman_encode_block(zigzagged_block):

    frequencies = collections.Counter(zigzagged_block)
    huffman_codec = HuffmanCodec.from_frequencies(frequencies)
    temp = []
    for k, v in frequencies.items():
        temp.extend([k, v])
    return huffman_codec.encode(zigzagged_block), dict(frequencies)
 def get_codec(self):
     """Generate the codec for encoding and decoding based on random sentence
        Args: None
        Output: the codec
     """
     codec = HuffmanCodec.from_data(
         "the quick brown fox jumps over the lazy dog")
     return codec
示例#6
0
def test_print_code_table2():
    codec = HuffmanCodec.from_data("aaaaa")
    out = io.StringIO()
    codec.print_code_table(out=out)
    actual = out.getvalue().split('\n')
    expected = "Bits Code Value Symbol\n   1 0        0 _EOF\n   1 1        1 'a'\n".split(
        '\n')
    assert actual[0] == expected[0]
    assert set(actual[1:]) == set(expected[1:])
def encode(frame, huffman=False):
    dct_frame = dct(frame)
    if not huffman:
        rle_frame = rle(dct_frame.flatten())
        return rle_frame
    else:
        codec = HuffmanCodec.from_data(dct_frame.flatten())
        rle_frame = codec.encode(dct_frame.flatten())
        return rle_frame, codec
示例#8
0
def test_print_code_table():
    codec = HuffmanCodec.from_frequencies({'a': 2, 'b': 4, 'c': 8})
    out = StringIO()
    codec.print_code_table(out=out)
    dump = out.getvalue()
    assert re.search(r"1\s+1\s+.*'c'", dump)
    assert re.search(r"2\s+01\s+.*'b'", dump)
    assert re.search(r"3\s+001\s+.*'a'", dump)
    assert re.search(r"3\s+000\s+.*_EOF", dump)
示例#9
0
def test_save(tmp_path: Path):
    codec1 = HuffmanCodec.from_data('aabcbcdbabdbcbd')
    path = str(tmp_path / 'foo' / 'bar.huff')
    codec1.save(path)
    output1 = codec1.encode('abcdabcd')
    codec2 = PrefixCodec.load(path)
    output2 = codec2.encode('abcdabcd')
    assert output1 == output2
    assert codec1.decode(output1) == codec2.decode(output2)
示例#10
0
def test_trailing_zero_handling():
    """
    Just two symbols ('a' and 'b'): without end-of-file handling, each would only take 1 bit (e.g. a=0 and b=1)
    so 'abba' would be 4 bits '0110', trailed with zeros to fill a byte: '0110000', which is indiscernible
    from result of input 'abbaaaaa'. With proper end-of-file handling, trailing bits are ignored properly.
    """
    codec = HuffmanCodec.from_frequencies({'a': 1, 'b': 1})
    decoded = codec.decode(codec.encode('abba'))
    assert decoded == 'abba'
示例#11
0
def test_custom_eof_in_frequencies():
    codec = HuffmanCodec.from_frequencies({
        'A': 5,
        'B': 3,
        'C': 2,
        'Z': 8
    },
                                          eof="Z")
    encoded = codec.encode("ABCACBZABAB")
    assert codec.decode(encoded) == "ABCACB"
示例#12
0
def huffman_coding_decoding(RLE_total):

    RLE_total_new = str(RLE_total)

    codec = HuffmanCodec.from_data(RLE_total_new)
    print("Huffman Code Table: \n")

    codec.print_code_table()
    coded_string = codec.encode(RLE_total_new)
    decoded_string = codec.decode(coded_string)
    return codec, coded_string, decoded_string
    def generate_table(self,inputfilename):
        self.generate_data_list(inputfilename)
        self.codec = HuffmanCodec.from_data(self.__data_ns)
        self.table = self.codec.get_code_table()

        self.__strings_table = {}
        for symbol in self.table.keys():
            if not type(symbol) is int:
                self.eof = symbol
            bitsize, value = self.table[symbol]
            self.__strings_table[symbol] = bin(value)[2:].rjust(bitsize, '0')
示例#14
0
    def encodedStringDict(self, listOfStrings=[]):
        codec = HuffmanCodec.from_data(listOfStrings)
        encodeResults = codec.encode(listOfStrings)

        def decodeListOfString(encode):
            decodeResults = codec.decode(encode)
            return decodeResults

        decodedList = decodeListOfString(encodeResults)

        encodedDict = dict(zip(listOfStrings, encodeResults))
        return encodedDict, decodedList
def Huffman_Encoder(binary_map, bits_group_num=64):
    binary_map = float2bin(binary_map)
    map_length = len(binary_map)
    if map_length % bits_group_num:
        bools_list = list(binary_map[:-(map_length % bits_group_num)].reshape(
            -1, bits_group_num))
        bools_list.append(binary_map[-(map_length % bits_group_num):])
    else:
        bools_list = list(binary_map.reshape(-1, bits_group_num))
    bits_string = [b.tobytes() for b in bools_list]
    codec = HuffmanCodec.from_data(bits_string)
    output = codec.encode(bits_string)
    return output, codec
示例#16
0
    def save_compressed(self, som, name):
        # winners = np.zeros((neuron_nbr, neuron_nbr))
        # for i in range(len(self.data)):
        #     w = som.winner(self.data[i]/255)
        #     winners[w] += 1
        winners = som.winners()
        file = open(output_path + name, "w")
        # file.write(str(som.get_som_as_list()))
        # res = ""
        # str_win = ""
        diff = Dataset.differential_coding(winners.flatten(),
                                           self.nb_pictures[1])

        # for i in range(len(self.data)):
        #     res += str(diff[i])+" "
        #     str_win += str(winners[i])+" "

        # # Codebook compression
        # codebook = som.get_som_as_list()
        # str_codebook = ""
        # for i in codebook:
        #     for j in range(len(i)):
        #         str_codebook += str(j)+" "
        #     str_codebook += "\n"

        codeNormal = HuffmanCodec.from_data(winners).encode(winners)
        codeDiff = HuffmanCodec.from_data(diff).encode(diff)
        hd = np.concatenate(som.get_som_as_list(), 0) * 255
        hd = np.array(hd, 'uint8')
        header = HuffmanCodec.from_data(hd).encode(hd)
        file.write(str(header))
        file.write(str(codeDiff))
        file.close()

        print("Taux de compression du codage différentiel :",
              len(codeNormal) / len(codeDiff))
        print(
            "Taux de compression total :",
            len(self.data) * len(self.data[0]) / (len(header) + len(codeDiff)))
示例#17
0
    def decode_huffman_encoded_string(self, encoded_item):
        """Decode a given string previously Huffman encoded and latin1 decoded.

        Parameters:
            encoded_item (dictionary): a dictionary with a string key and a 
            Huffman encoded and latin1 decoded value.

        Returns:
            A Huffman decoded string as JSON.
        """
        encoded_item = json.loads(encoded_item)
        key = next(iter(encoded_item))
        item_codec = HuffmanCodec.from_data(key)
        return json.dumps(item_codec.decode(
            encoded_item[key].encode('latin1')))
示例#18
0
    def compress_pic(self, quant_blocks):
        """
        compress the matrix data
        :param quant_blocks: written quantified matrix
        :return:huffman tree and encoded data
        """

        sequence = list(
            chain(*[
                list(chain(*[self.mat2sequence(mat) for mat in line]))
                for line in quant_blocks
            ]))
        codec = HuffmanCodec.from_data(sequence)
        encoded = codec.encode(sequence)
        return codec, encoded
示例#19
0
def encode(data, fps=10):
    assert len(data.shape) == 4
    num_frames = data.shape[0]
    flattened = torch.flatten(data).sign()
    print(flattened.shape)
    flattened[flattened == 0] = 1
    data = flattened.int().tolist()

    freqs = {}
    freqs[1] = torch.sum(flattened == 1).item()
    freqs[-1] = torch.sum(flattened == -1).item()
    assert freqs[1] + freqs[-1] == len(data)

    codec = HuffmanCodec.from_frequencies(freqs)
    encoded = codec.encode(data)
    added_bitrate = (len(encoded) + (len(freqs) * (32 * 2 * 8))) / (num_frames * (2 ** 20)) * fps
    return freqs, encoded, added_bitrate
示例#20
0
    def huffman_encode_strings(self, items):
        """Build a dictionary of strings - the key being the original string, 
        and the value being a (Huffman) encoded version of that string.

        Parameters:
            items (list): a list of strings.

        Returns:
            A dictionary of Huffman encoded strings with original strings as 
            keys and encoded strings as latin1 decoded values as JSON.
        """
        encoded_items = {}
        for item in items:
            item_codec = HuffmanCodec.from_data(item)
            encoded_item = item_codec.encode(item)
            encoded_items.update({item: encoded_item.decode('latin1')})
        return json.dumps(encoded_items)
示例#21
0
def encode(model, out_file='out.pkl'):
    print('Start encoding...')
    quanti_dic = OrderedDict()
    for k, v in model.state_dict().items():
        print(k)
        if 'running' in k or 'batches' in k:
            print("Ignoring {}".format(k))
            quanti_dic[k] = v
            continue
        else:
            layer_w = v.data.cpu().numpy().flatten()
            codec = HuffmanCodec.from_data(layer_w)
            encoded = codec.encode(layer_w)
            quanti_dic[k] = [encoded, codec]
    outfile = open(out_file, 'wb')
    pickle.dump(quanti_dic, outfile)
    outfile.close()
    print('Done. Save to {}'.format(out_file))
示例#22
0
def main():
    logging.basicConfig(level=logging.INFO)

    # XML data sets from https://www.data.gov/
    urls = [
        "https://data.cityofnewyork.us/api/views/kku6-nxdu/rows.xml",
        "https://data.cdc.gov/api/views/bi63-dtpu/rows.xml",
        "https://data.cdc.gov/api/views/cjae-szjv/rows.xml",
        "https://data.cityofnewyork.us/api/views/25th-nujf/rows.xml",
        "https://data.ct.gov/api/views/kbxi-4ia7/rows.xml",
        "https://data.cityofchicago.org/api/views/pfsx-4n4m/rows.xml",
        "https://data.cdc.gov/api/views/6vp6-wxuq/rows.xml",
        "https://www.sba.gov/sites/default/files/data.xml",
        "https://data.cdc.gov/api/views/e6fc-ccez/rows.xml",
        "https://data.cityofnewyork.us/api/views/jb7j-dtam/rows.xml",
        "https://data.cityofnewyork.us/api/views/zt9s-n5aj/rows.xml",
        "https://gisdata.nd.gov/Metadata/ISO/xml/metadata_Roads_MileMarkers.xml",
        "https://data.cityofchicago.org/api/views/kn9c-c2s2/rows.xml",
        "https://data.cityofnewyork.us/api/views/5t4n-d72c/rows.xml",
        "https://data.cdc.gov/api/views/6rkc-nb2q/rows.xml",
        "https://gisdata.nd.gov/Metadata/ISO/xml/metadata_Airports.xml",
        "https://data.sfgov.org/api/views/j4sj-j2nf/rows.xml",
        "https://data.kingcounty.gov/api/views/gmen-63jm/rows.xml",
        "https://data.mo.gov/api/views/vpge-tj3s/rows.xml",
    ]

    _log.info('Building frequency tables')
    frequencies = Counter()
    for url in urls:
        path = download(
            url, 'xml-data/' + hashlib.md5(url.encode('utf-8')).hexdigest() +
            '.xml')
        with path.open('r') as f:
            # Only take first N bytes.
            # Large files probably have a lot of structural repetition, which skews the frequencies
            raw = f.read(100000)
        frequencies.update(raw)

    # TODO add more metadata
    _log.info(f'Frequencies raw {len(frequencies)}: {frequencies}')
    codec = HuffmanCodec.from_frequencies(frequencies)
    codec.save(CODECS / "xml.pickle", metadata={"frequencies": frequencies})
示例#23
0
def decodeAudio(audioFileEncoded):
   encfile=audioFileEncoded
   print("Archivo encoded=",encfile)

   N=1024 #numero de MDCT subbands
   nfilts=64  #numero de subbands en bark domain
   #Sine window:
   fb=np.sin(np.pi/(2*N)*(np.arange(int(1.5*N))+0.5))
   #abrir archivo binario pickle:
   #Quita extension del nombre del archivo 
   name,ext=os.path.splitext(encfile)
   #nuevo nombre y extension para el archivo decoded
   decfile=name+'decodeFile.wav'
   print("Decoded Archivo:", decfile)

   with open(encfile, 'rb') as codedfile:
      fs=pickle.load(codedfile)
      channels=pickle.load(codedfile)
      print("Fs=", fs, "Canales=", channels, )
      
      for chan in range(channels): #loop sobre canales:
         print("Canal ", chan)
         tablemTbarkquant=pickle.load(codedfile) #factor escala huffman tabla
         tableyq=pickle.load(codedfile)  #subanda huffman tabla samples
         mTbarkquantc=pickle.load(codedfile) #Huffman coded factor de escala
         yqc=pickle.load(codedfile)  #Huffman coded subandas samples

         #Huffman decoder para factor escala
         codecmTbarkquant=HuffmanCodec(code_table=tablemTbarkquant, check=False)
         #Huffman decoded factor escala
         mTbarkquantflattened=codecmTbarkquant.decode(mTbarkquantc)
         #reshape para volver a una matriz con columnas de length nfilts
         mTbarkquant=np.reshape(mTbarkquantflattened, (nfilts,-1),order='F')

         #Huffman decoder para subandas samples
         codecyq=HuffmanCodec(code_table=tableyq, check=False)
         #Huffman decode subandas samples
         yqflattened=codecyq.decode(yqc)
         #reshape para volver a una matriz con columnas length
         yq=np.reshape(yqflattened, (N,-1),order='F')
         #dequantizar y calcular MDCT and compute MDCT sintesis
         xrek, mT, ydeq = MDCTsyn_dequant_dec(yq, mTbarkquant, fs, fb, N, nfilts)
         if chan==0:
            x=xrek
         else:
            x=np.vstack((x,xrek))
   x=np.clip(x.T,-2**15,2**15-1)
   #Escribe señal decoded a un archivo wav file
   wav.write(decfile,fs,np.int16(x))
def decode(data):
    # decode binary data with special header for decompress algorithm
    real_size = int.from_bytes(data[:POS_NUM_OF_CHARS], byteorder='little')
    num_of_chars = int.from_bytes(data[POS_NUM_OF_CHARS:POS_NUM_OF_CHARS + 2],
                                  byteorder='big')
    # int(data[POS_NUM_OF_CHARS])

    dummy_list = []

    print("num_of_chars ", num_of_chars)
    for i in range(num_of_chars):
        c_ch = data[POS_ALPHABET + i * 4]

        c_ch_freq = int.from_bytes(data[POS_ALPHABET + 1 + i * 4:POS_ALPHABET +
                                        1 + i * 4 + 3],
                                   byteorder='big')

        dummy_list.append((c_ch, c_ch_freq))

    alphabet_frequencies = dict(dummy_list)
    print("alphabet_frequencies", alphabet_frequencies)
    codec = HuffmanCodec.from_frequencies(alphabet_frequencies)
    codec.print_code_table()
    return codec.decode(data[POS_ALPHABET + num_of_chars * 4:])
示例#25
0
 def __init__(self, method):
     try:
         self.freqs = frequencies[method]
         self.codec = HuffmanCodec.from_frequencies(self.freqs)
     except KeyError:
         raise KeyError('Invalid Huffman dictionary.')
 def decodeStr(self, strLis, decStr):
     codec = HuffmanCodec.from_data(strLis)
     decoded = codec.decode(decStr)
     decoded2 = decoded.decode("utf-8")
     return decoded2
示例#27
0
 def encode(self, index):
     self.codebook = HuffmanCodec.from_data(index)
     self.encoded_index = self.codebook.encode(index)
     return self.encoded_index
def main():
    # Create a TCP/IP socket
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    # Bind the socket to the port
    server_address = ('localhost', 10000)
    print(sys.stderr, 'starting up on %s port %s' % server_address)
    sock.bind(server_address)

    # Listen for incoming connections
    sock.listen(1)
    connect = True
    failCount = 0
    passCount = 0

    INCEPT = torchvision.models.inception_v3(pretrained=True)
    PREVIOUS_ARRAY = None
    USE_DELTA = True
    LAST_EDGE_LAYER = FLAGS.layer_index
    NUM_BINS = FLAGS.num_bins
    DELTA_VALUE = FLAGS.delta_value
    save_results = FLAGS.save_results
    analyse_fc_results = FLAGS.compare_fc

    print('Number of bins: ', NUM_BINS)
    print('DELTA VALUE: ', DELTA_VALUE)
    print('Last edge index: ', LAST_EDGE_LAYER)
    print('Saving Results: ', save_results)

    if (LAST_EDGE_LAYER == 7):
        RESHAPE_ARRAY_DIMENSIONS = [192, 35, 35]
    elif (LAST_EDGE_LAYER == 11):
        RESHAPE_ARRAY_DIMENSIONS = [768, 17, 17]
    elif (LAST_EDGE_LAYER == 6):
        RESHAPE_ARRAY_DIMENSIONS = [192, 71, 71]
    else:
        RESHAPE_ARRAY_DIMENSIONS = None
        print("Reshape dimensions not defined for layer being partitioned")

    codec_path = 'huffman_encoding_config/' + 'layer' + str(LAST_EDGE_LAYER) + '/' + 'num_bins_' + str(NUM_BINS)
    delta_hist = load_huff_dictionary(codec_path + '/delta_hist')
    delta_codec = HuffmanCodec.from_frequencies(delta_hist)
    frame_one_hist = load_huff_dictionary(codec_path + '/frame_one_hist')
    frame_one_codec = HuffmanCodec.from_frequencies(frame_one_hist)

    sizes = []
    videos = [
        "videos/n01443537/goldfish_1.mp4",
        "videos/n01443537/goldfish_2.mp4",
        "videos/n01443537/goldfish_3.mp4",

        "videos/n01882714/koala_1.mp4",
        "videos/n01882714/koala_2.mp4",
        "videos/n01882714/koala_3.mp4",

        "videos/n02085620/dog_1.mp4",

        "videos/n02099601/golden_retriever_1.mp4",

        "videos/n02099712/golden_retriever_1.mp4",

        "videos/n02110958/pug_1.mp4",
        "videos/n02110958/pug_3.mp4",
        "videos/n02110958/pug_4.mp4",

        "videos/n02206856/bee_1.mp4",

        "videos/n02391049/zebra_1.mp4",
        "videos/n02391049/zebra_2.mp4",
        "videos/n02391049/zebra_3.mp4",

        "videos/n02510455/panda_1.mp4",
        "videos/n02510455/panda_2.mp4",
        "videos/n02510455/panda_3.mp4",
        "videos/n02510455/panda_4.mp4",
        "videos/n02510455/panda_5.mp4",

        "videos/n02676566/guitar_1.mp4",
        "videos/n02676566/guitar_2.mp4",
        "videos/n02676566/guitar_3.mp4",
        "videos/n02676566/guitar_4.mp4",
        "videos/n02676566/guitar_6.mp4",

        "videos/n02787622/banjo_1.mp4",
        "videos/n02787622/banjo_2.mp4",
        "videos/n02787622/banjo_3.mp4",
        "videos/n02787622/banjo_5.mp4",

        "videos/n03452741/piano_1.mp4",
        "videos/n03452741/piano_2.mp4",

        "videos/n03495258/harp_1.mp4",
        "videos/n03495258/harp_2.mp4",
        "videos/n03495258/harp_3.mp4",

        "videos/n03584254/ipod_1.mp4",
        "videos/n03584254/ipod_2.mp4",

        "videos/n03967562/plough_1.mp4",

        "videos/n04536866/violin_3.mp4",
        "videos/n04536866/violin_4.mp4",

        "videos/n06596364/comic_1.mp4",

        "videos/n01910747/jelly_fish_1.mp4",
        "videos/n01910747/jelly_fish_2.mp4",

        "videos/n02134084/polar_bear_1.mp4",
        "videos/n02134084/polar_bear_3.mp4",

        "videos/n02342885/hamster_1.mp4",
        "videos/n02342885/hamster_2.mp4",
        "videos/n02342885/hamster_4.mp4",
        "videos/n02342885/hamster_5.mp4",

        "videos/n02364673/guinea_pig_1.mp4",
        "videos/n02364673/guinea_pig_2.mp4"
    ]

    # for analysing fc output
    if analyse_fc_results is True:
        test_videos = [
            "videos/n01882714/koala_1.mp4",
            "videos/n02510455/panda_1.mp4",
            "videos/n02676566/guitar_2.mp4",
            "videos/n02133161/bear_1.mp4",
            "videos/n02110958/pug_3.mp4"
        ]
        videos = test_videos

    vid_num = 0
    frame_number = 0

    cats = json.load(open('config/categories.json'))
    class_id = videos[vid_num].split('/')[1]
    for j in range(len(cats)):
        if cats[j]['id'] == class_id:
            index = cats[j]['index']

    while True:
        # Wait for a connection
        received = ''
        arr = bytearray()
        byte_size = 0

        print(sys.stderr, 'waiting for a connection')
        connection, client_address = sock.accept()

        try:
            print(sys.stderr, 'connection from', client_address)

            # Receive the data in small chunks and retransmit it
            while True:
                data = connection.recv(1024)
                byte_size = byte_size + len(data)

                arr.extend(data)
                # print(sys.stderr, 'received "%s"' % data)
                if data:
                    # print(sys.stderr, 'sending data back to the client')
                    connection.sendall(data)
                else:
                    print(sys.stderr, 'no more data from', client_address)
                    connect = False
                    break

        finally:
            # Clean up the connection
            connection.close()
            print('Size of data received: ', byte_size)

            # code for receiving reset to frame one
            if byte_size == 1:
                print('Received reset')
                avg_byte_size = sum(sizes) / len(sizes)
                passRate = (passCount / (failCount + passCount)) * 100
                print('percentage of passed: ', passRate)
                result = 'file: ' + videos[vid_num] + ', %Passed: ' + str(passRate) + ', avg_byte_size: ' \
                         + str(avg_byte_size) + ', layer: ' + str(LAST_EDGE_LAYER) + ', num_bins_used: ' + str(
                    NUM_BINS) + \
                         ', Delta Value: ' + str(DELTA_VALUE) + '\n'
                results_path = "Results" + '/layer' + str(LAST_EDGE_LAYER) + '/num_bins_' + str(
                    NUM_BINS) + '/delta_value' \
                               + str(DELTA_VALUE)

                if save_results is True:
                    if not os.path.isdir(results_path):
                        try:
                            os.makedirs(results_path)
                        except OSError as e:
                            if e.errno != errno.EEXIST:
                                raise
                    with open(results_path + "/results.txt", "a") as myfile:
                        myfile.write(result)

                # Resetting variables
                PREVIOUS_ARRAY = None
                sizes = []
                passCount = 0
                failCount = 0
                vid_num += 1
                frame_number = 0

                cats = json.load(open('config/categories.json'))
                class_id = videos[vid_num].split('/')[1]
                for j in range(len(cats)):
                    if cats[j]['id'] == class_id:
                        index = cats[j]['index']

            elif PREVIOUS_ARRAY is not None and USE_DELTA is True:
                decoded = delta_codec.decode(arr)
                arr = np.reshape(decoded, RESHAPE_ARRAY_DIMENSIONS)

                decoded_arr = decode(arr, NUM_BINS, max_num=8, min_num=-8)
                delta_decoded_arr = decode_delta(PREVIOUS_ARRAY, decoded_arr)
                PREVIOUS_ARRAY = delta_decoded_arr
                fc_out = server_run(torch.Tensor(delta_decoded_arr), LAST_EDGE_LAYER, INCEPT)
                result = classify_server_run(fc_out, class_label=index)
            else:
                decoded = frame_one_codec.decode(arr)
                arr = np.reshape(decoded, RESHAPE_ARRAY_DIMENSIONS)

                decoded_arr = decode(arr, NUM_BINS, max_num=8, min_num=-8)
                PREVIOUS_ARRAY = decoded_arr
                fc_out = server_run(torch.Tensor(decoded_arr), LAST_EDGE_LAYER, INCEPT)
                result = classify_server_run(fc_out, class_label=index)

            # 0 for false 1 for true, str so can be written to file and easily calculate total
            top_five_is_the_same = '0'
            top_one_is_the_same = '0'
            if analyse_fc_results is True:
                video_file = videos[vid_num].split('/')[2]
                video = video_file.split('.')[0]
                saved_fc_dir = 'Results/fc_results/' + class_id
                path_to_saved_fc = saved_fc_dir + '/' + video + '_' + str(frame_number) + '.npy'
                print('looking for: ', path_to_saved_fc)
                if os.path.isdir(saved_fc_dir):
                    print('path exists')
                    unencoded_fc = np.load(path_to_saved_fc)
                    unencoded_top_five = unencoded_fc.argsort()[0][-1:-6:-1]  # Get top 5 classifications.
                    encoded_top_five = fc_out.data.numpy().argsort()[0][-1:-6:-1]
                    unencoded_top = unencoded_fc.argsort()[0][-1]
                    encoded_top = fc_out.data.numpy().argsort()[0][-1]
                    if (np.array_equal(unencoded_top_five, encoded_top_five)):
                        top_five_is_the_same = '1'
                    if (np.array_equal(unencoded_top, encoded_top)):
                        top_one_is_the_same = '1'

                    path_to_results = 'Results/fc_results/comparison_results/layer_' + str(LAST_EDGE_LAYER) + \
                                      '/num_bins_' + str(NUM_BINS) + \
                                      '/delta_value_' + str(DELTA_VALUE)
                    fc_analysis_result = videos[vid_num] + ' ,same top five predictions ,' + top_five_is_the_same + \
                                         ' ,same top one prediction ,' + top_one_is_the_same  + '\n'
                    if not (os.path.isdir(path_to_results)):
                        os.makedirs(path_to_results)
                    with open(path_to_results + '/fc_results.txt', 'a') as myfile:
                        myfile.write(fc_analysis_result)

            frame_number += 1
            if result:
                passCount += 1
            else:
                failCount += 1
            sizes.append(byte_size)
            print('Total checked: ', passCount + failCount)
            print('Number of correct classifications: ', passCount)
            print('Number Failed: ', failCount)
示例#29
0
def test_non_string_symbols(data):
    codec = HuffmanCodec.from_data(data)
    encoded = codec.encode(data)
    assert type(encoded) == type(b'')
    decoded = codec.decode(encoded)
    assert decoded == data
示例#30
0
def test_decode_concat():
    codec = HuffmanCodec.from_data([1, 2, 3])
    encoded = codec.encode([1, 2, 1, 2, 3, 2, 1])
    decoded = codec.decode(encoded, concat=sum)
    assert decoded == 12