def save_model_bin(self, binFilePath, modelPath=None): from fxpmath import Fxp if modelPath != None: self.model = load_model(modelPath, compile="False") with open(binFilePath, 'wb') as binFile: largest_inaccuracy = 0 for layer in self.model.layers: g = layer.get_config() h = layer.get_weights() print(g) #binFile.write(json.dumps(g).encode(encoding="ascii",errors="unknown char")) # embedding = 1 * 68 * 8 # simple rnn = 8 * 8, 8 * 8, 8 * 1 # drop out: none # dense: 8 * 1, 1 * 1 # activation: sigmoid for i in h: i = np.array(i) for index, x in np.ndenumerate(i): print(x) h_fxp = Fxp(x, signed=True, n_word=16, n_frac=8) difference = abs(h_fxp.get_val() - x) if difference > largest_inaccuracy: largest_inaccuracy = difference print(h_fxp.bin()) binFile.write(h_fxp.bin().encode( encoding="ascii", errors="unknown char")) print("largest difference") print(str(largest_inaccuracy))
def save_model_txt_binary(self, txtPath, modelPath=None): from fxpmath import Fxp import math import json if modelPath != None: self.model = load_model(modelPath, compile="False") with open(txtPath, 'w') as txtFile: for layer in self.model.layers: g = layer.get_config() h = layer.get_weights() txtFile.write(json.dumps(g)) txtFile.write("\n") for i in h: i = np.array(i) for index, x in np.ndenumerate(i): if g["name"] == "dropout": continue #if g["name"] == "embedding": # print(index) # row = math.floor(index / 4) # col = index % 4 # txtFile.write("row:"+str(row)+"col:"+col) # else: # row = math.floor(index / 32) # col = index % 32 # txtFile.write("row:"+str(row)+"col:"+col) if len(index) > 1: row = index[0] col = index[1] txtFile.write("row:" + str(row) + "col:" + str(col)) else: row = index[0] txtFile.write("row:" + str(row)) h_fxp = Fxp(x, signed=True, n_word=16, n_frac=8) txtFile.write("val:" + h_fxp.bin()) txtFile.write("\n")
word_bits = 16 frac_bits = 11 error = [] for f_ in files: with open(f_, 'r') as handle: data = np.genfromtxt(handle, delimiter=',') fxp_val = np.zeros_like(data) fxp_bin = np.ndarray(data.shape, dtype='U16') for line in range(len(data)): if "bias" in f_: fxp_sample = Fxp(data[line], True, word_bits, frac_bits) fxp_val[line] = fxp_sample.get_val() fxp_bin[line] = fxp_sample.bin() error.append( np.nan_to_num((data[line] - fxp_val[line]) / data[line])) else: for column in range(len(data[line])): fxp_sample = Fxp(data[line][column], True, word_bits, frac_bits) fxp_val[line][column] = fxp_sample.get_val() fxp_bin[line][column] = fxp_sample.bin() error.append( np.nan_to_num( (data[line][column] - fxp_val[line][column]) / data[line][column])) with open('report.txt', 'a') as r: r.write(f_ + ' min: ' + str(np.min(fxp_val)) + ' - max: ' + str(np.max(fxp_val)) + '\n')
import sys sys.path.insert(1, '/Users/jingyuan/Desktop/dga/dga_detection_rnn') from fxpmath import Fxp import numpy as np tanh_table_path = "conf/tanh_table.bin" with open(tanh_table_path, 'wb') as binFile: for entry in range(10): xVal = -4 + entry * 8 / 9 print(xVal) x_fxp = Fxp(xVal, signed=True, n_word=16, n_frac=12) print(x_fxp.bin()) xTanh = np.tanh(xVal) print(xTanh) xTanh_fxp = Fxp(xTanh, signed=True, n_word=16, n_frac=12) print(xTanh_fxp.bin()) binFile.write(x_fxp.bin().encode(encoding="ascii", errors="unknown char")) binFile.write(xTanh_fxp.bin().encode(encoding="ascii", errors="unknown char"))