def quantize_net_with_dict(net, layers, codebook, use_stochastic=False, timing=False): start_time = time.time() codeDict = {} maskCode = {} for layer in layers: print "Quantize layer:", layer W = net.params[layer][0].data if use_stochastic: codes = stochasitc_quantize2(W.flatten(), codebook[layer]) else: codes, _ = scv.vq(W.flatten(), codebook[layer]) W_q = np.reshape(codebook[layer][codes], W.shape) net.params[layer][0].data[...] = W_q maskCode[layer] = np.reshape(codes, W.shape) codeBookSize = len(codebook[layer]) a = maskCode[layer].flatten() b = xrange(len(a)) codeDict[layer] = {} for i in xrange(len(a)): codeDict[layer].setdefault(a[i], []).append(b[i]) if timing: print "Update codebook time:%f" % (time.time() - start_time) return codeDict, maskCode
def quantize_net(net, codebook): layers = codebook.keys() print "================Perform quantization==============" for layer in layers: print "Quantize layer:", layer W = net.params[layer][0].data codes, _ = scv.vq(W.flatten(), codebook[layer]) W_q = np.reshape(codebook[layer][codes], W.shape) np.copyto(net.params[layer][0].data, W_q)
def quantize_net(net, codebook, use_stochastic=False): layers = codebook.keys() print "================Perform quantization==============" for layer in layers: print "Quantize layer:", layer W = net.params[layer][0].data if use_stochastic: codes = stochasitc_quantize2(W.flatten(), codebook[layer]) else: codes, _ = scv.vq(W.flatten(), codebook[layer]) W_q = np.reshape(codebook[layer][codes], W.shape) np.copyto(net.params[layer][0].data, W_q)
def get_codes(net, codebook): layers = codebook.keys() codes_W = {} codes_b = {} print "================Perform quantization==============" for layer in layers: print "Quantize layer:", layer W = net.params[layer][0].data b = net.params[layer][1].data codes, _ = scv.vq(W.flatten(), codebook[layer]) codes = np.reshape(codes, W.shape) codes_W[layer] = np.array(codes, dtype=np.uint32) W_q = np.reshape(codebook[layer][codes], W.shape) np.copyto(net.params[layer][0].data, W_q) codes, _ = scv.vq(b.flatten(), codebook[layer]) codes = np.reshape(codes, b.shape) codes_b[layer] = np.array(codes, dtype=np.uint32) b_q = np.reshape(codebook[layer][codes], b.shape) np.copyto(net.params[layer][1].data, b_q) return codes_W, codes_b
def get_codes(net, codebook): layers = codebook.keys() codes_W = {} codes_b = {} print "================Perform quantization==============" for layer in layers: print "Quantize layer:", layer W = net.params[layer][0].data b = net.params[layer][1].data codes, _ = scv.vq(W.flatten(), codebook[layer]) codes = np.reshape(codes, W.shape) codes_W[layer] = np.array(codes, dtype=np.uint32) W_q = np.reshape(codebook[layer][codes], W.shape) np.copyto(net.params[layer][0].data, W_q) return codes_W
def recover_all(net, dir_t, idx=0): layers = net.params.keys() net.copy_from(dir_t + 'caffemodel%d' % idx) codebook = pickle.load(open(dir_t + 'codebook%d' % idx)) maskCode = {} codeDict = {} for layer in layers: W = net.params[layer][0].data codes, _ = scv.vq(W.flatten(), codebook[layer]) maskCode[layer] = np.reshape(codes, W.shape) codeBookSize = len(codebook[layer]) a = maskCode[layer].flatten() b = xrange(len(a)) codeDict[layer] = {} for i in xrange(len(a)): codeDict[layer].setdefault(a[i], []).append(b[i]) return codebook, maskCode, codeDict
return ptr, spm, ind W = np.load('W.npy').astype('f') act = np.zeros(W.shape[1], dtype='int16') act[0] = 1 act[1] = 2 act[5] = 1 groundtruth = np.dot(W, act) groundtruth = (groundtruth * 32).astype('int16') codebook = np.arange(16).astype('f') / 32 W_codes, _ = scv.vq(W.flatten(), codebook) W_codes = np.reshape(W_codes, W.shape) ptr, spm, ind = get_csc_single_nobias(W_codes, bank_num=2, max_jump=16) data_dir = 'test_data' os.system("rm -rf " + data_dir) os.system("mkdir " + data_dir) os.system("mkdir " + data_dir + '/ptr') os.system("mkdir " + data_dir + '/spm') for idx in range(2): with open("%s/ptr/ptr%d.dat" % (data_dir, idx), 'wb') as f: f.write('%d\n' % len(ptr[idx])) for number in ptr[idx]: f.write('{:016b} '.format(number))