예제 #1
0
def _decode(fout_p, symbols_shape_padded, ctx_shape, first_sym, get_freqs,
            printer):
    # Idea:
    # have a matrix symbols_decoded, initially all zeros.
    # put first_sym into symbols_decoded
    # use a normal ctx_itr to retrieve the current context from symbols_decoded
    # use symbol_idx_itr to get the index of the next decoded symbol
    # write the decoded symbol into symbols_decoded, then advancethe ctx_itr to get the next context
    with open(fout_p, 'rb') as fin:
        bitin = ac.BitInputStream(fin)
        dec = ac.ArithmeticDecoder(bitin)

        symbols_decoded = np.zeros(symbols_shape_padded, dtype=np.int32)
        ctx_itr = _new_ctx_itr(symbols_decoded, ctx_shape)
        ctx_size = probclass.context_size_from_context_shape(ctx_shape)
        sym_idxs_itr = _new_sym_idxs_itr(symbols_shape_padded,
                                         ctx_size=ctx_size)

        next(ctx_itr)  # skip first ctx
        symbols_decoded[next(sym_idxs_itr)] = first_sym  # write first_sym
        num_ctxs = _get_num_ctxs(symbols_shape_padded, ctx_shape)
        for i, (current_ctx,
                next_decoded_sym_idx) in enumerate(zip(ctx_itr, sym_idxs_itr)):
            freqs = get_freqs(current_ctx)
            symbol = dec.read(freqs)
            symbols_decoded[next_decoded_sym_idx] = symbol
            if i % 1000 == 0:
                printer('\rFeeding context for symbol #{}/{}...'.format(
                    i, num_ctxs),
                        end='',
                        flush=True)
        printer('\r\033[K', end='')  # clear line
        return symbols_decoded
def voxelDNN_decoding(args):
    ply_path, model_path, outputfile, metadata, flags_pkl = args
    start = time.time()
    # reading metadata
    with gzip.open(metadata, "rb") as f:
         decoded_binstr, pc_level, departition_level = load_compressed_file(f)
    # getting encoding input data
    boxes, binstr, no_oc_voxels = occupancy_map_explore(ply_path, pc_level, departition_level)

    with open(flags_pkl,'rb') as f:
        flags = pickle.load(f) # flags are contained in a pkl file, collecting information purpose

    bbox_max = 2 ** (pc_level - departition_level)
    voxelDNN = VoxelDNN(bbox_max, bbox_max, bbox_max)
    voxel_DNN = voxelDNN.restore_voxelDNN(model_path)
    with open(outputfile, "rb") as inp:
        bitin = arithmetic_coding.BitInputStream(inp)
        dec = arithmetic_coding.ArithmeticDecoder(32, bitin)
        decoded_boxes = np.zeros_like(boxes)
        for i in range(len(boxes)):
#         for i in range(1):
            decoded_boxes[i] = decompress_from_adaptive_freqs(decoded_boxes[i], np.asarray(boxes[i]), flags[i][1], dec, voxel_DNN, bbox_max)
    decoded_boxes = decoded_boxes.astype(int)
    end = time.time()
    print('Encoding time: ', end - start)
    
    boxes = boxes.astype(int)
    compare = decoded_boxes == boxes
    print('Check 1: decoded pc level: ',pc_level)
    print('Check 2: decoded block level',  departition_level)
    print('Check 3: decoded binstr ', binstr == decoded_binstr)
    print('Check 4: decoded boxes', np.count_nonzero(compare)/np.prod(compare.shape), np.count_nonzero(compare), '/', np.prod(compare.shape), compare.all())
예제 #3
0
def main(args):
    # Handle command line arguments
    if len(args) != 2:
        sys.exit("Usage: python adaptive-arithmetic-decompress.py InputFile OutputFile")
    inputfile, outputfile = args

    # Perform file decompression
    with open(inputfile, "rb") as inp, open(outputfile, "wb") as out:
        bitin = arithmetic_coding.BitInputStream(inp)
        decompress(bitin, out)
예제 #4
0
def decompress(comp_model, args):
    os.makedirs("outputs/reconstruction/", exist_ok=True)

    if os.path.isdir(args.binary_path):
        pathes = glob(os.path.join(args.binary_path, '*'))
    else:
        pathes = [args.binary_path]

    for path in pathes:
        fileobj = open(path, mode='rb')
        buf = fileobj.read(4)
        arr = np.frombuffer(buf, dtype=np.uint16)
        W, H = int(arr[0]), int(arr[1])
        buf = fileobj.read(2)
        arr = np.frombuffer(buf, dtype=np.uint8)
        pad_w, pad_h = int(arr[0]), int(arr[1])
        buf = fileobj.read(1)
        arr = np.frombuffer(buf, dtype=np.uint8)
        min_val = int(arr[0])

        bitin = ac.BitInputStream(fileobj)
        dec = ac.ArithmeticDecoder(bitin)
        y_hat = torch.zeros(1,
                            args.bottleneck,
                            H // 16,
                            W // 16,
                            dtype=torch.float32)
        _, yC, yH, yW = y_hat.size()

        pad = nn.ZeroPad2d((2, 2, 2, 0))
        y_hat_pad = pad(y_hat)

        print(
            '========================================================================'
        )
        print('image', os.path.basename(path))

        with torch.no_grad():
            with tqdm(product(range(yH), range(yW)), ncols=80,
                      total=yH * yW) as qbar:
                samples = np.arange(0, min_val * 2 + 1).reshape(-1, 1)
                for h, w in qbar:
                    p = comp_model.contextmodel.parameter_estimate(
                        y_hat_pad[:, :, h:h + 3, w:w + 5])
                    p = p.numpy()
                    p = np.reshape(p,
                                   (1, args.gmm_K, args.bottleneck * 3, 3, 5))
                    y_mu = p[:, :, :args.bottleneck, 2, 2] + min_val
                    y_std = np.abs(p[:, :, args.bottleneck:2 * args.bottleneck,
                                     2, 2])
                    y_w = p[:, :, 2 * args.bottleneck:, 2, 2]
                    y_w = np.exp(y_w) / np.sum(np.exp(y_w), axis=1)  #softmax

                    for ch in range(yC):
                        weight = y_w[:, :, ch]
                        mean = y_mu[:, :, ch]
                        std = y_std[:, :, ch]

                        high = weight * 0.5 * (1 + erf(
                            (samples + 0.5 - mean) / ((std + TINY) * 2**0.5)))
                        low = weight * 0.5 * (1 + erf(
                            (samples - 0.5 - mean) / ((std + TINY) * 2**0.5)))
                        pmf = np.sum(high - low, axis=1)
                        pmf_clip = np.clip(pmf, 1.0 / MAX_N, 1.0)
                        pmf_clip = np.round(pmf_clip / np.sum(pmf_clip) *
                                            MAX_N).astype(np.uint32)

                        freq = ac.SimpleFrequencyTable(pmf_clip)
                        symbol = dec.read(freq)
                        y_hat_pad[0, ch, h + 2, w + 2] = symbol - min_val

            y_hat = y_hat_pad[:, :, 2:, 2:yW + 2]
            fake_images = comp_model.decoder(y_hat)
            fake_images = fake_images[:, :, :H - pad_h, :W - pad_w]

            fakepath = "outputs/reconstruction/{}.jpg".format(
                os.path.basename(path).split('.')[0])
            cv2.imwrite(fakepath, img_torch2np(fake_images)[..., ::-1])

        print(
            '========================================================================\n'
        )