Exemple #1
0
 def save_model_bin(self, binFilePath, modelPath=None):
     from fxpmath import Fxp
     if modelPath != None:
         self.model = load_model(modelPath, compile="False")
     with open(binFilePath, 'wb') as binFile:
         largest_inaccuracy = 0
         for layer in self.model.layers:
             g = layer.get_config()
             h = layer.get_weights()
             print(g)
             #binFile.write(json.dumps(g).encode(encoding="ascii",errors="unknown char"))
             # embedding = 1 * 68 * 8
             # simple rnn = 8 * 8, 8 * 8, 8 * 1
             # drop out: none
             # dense: 8 * 1, 1 * 1
             # activation: sigmoid
             for i in h:
                 i = np.array(i)
                 for index, x in np.ndenumerate(i):
                     print(x)
                     h_fxp = Fxp(x, signed=True, n_word=16, n_frac=8)
                     difference = abs(h_fxp.get_val() - x)
                     if difference > largest_inaccuracy:
                         largest_inaccuracy = difference
                     print(h_fxp.bin())
                     binFile.write(h_fxp.bin().encode(
                         encoding="ascii", errors="unknown char"))
         print("largest difference")
         print(str(largest_inaccuracy))
def param_convert(x, a):
    y = Fxp(x,
            signed=True,
            n_word=a + 2,
            n_frac=a,
            overflow='saturate',
            rounding='around')
    y = y.get_val()
    if (a == 100):
        y = torch.from_numpy(x)
    else:
        y = torch.from_numpy(y)
    y = y.type(torch.FloatTensor)
    return y
Exemple #3
0
files = [f for f in listdir('./') if ".csv" in f]

word_bits = 16
frac_bits = 11

error = []

for f_ in files:
    with open(f_, 'r') as handle:
        data = np.genfromtxt(handle, delimiter=',')
        fxp_val = np.zeros_like(data)
        fxp_bin = np.ndarray(data.shape, dtype='U16')
        for line in range(len(data)):
            if "bias" in f_:
                fxp_sample = Fxp(data[line], True, word_bits, frac_bits)
                fxp_val[line] = fxp_sample.get_val()
                fxp_bin[line] = fxp_sample.bin()
                error.append(
                    np.nan_to_num((data[line] - fxp_val[line]) / data[line]))
            else:
                for column in range(len(data[line])):
                    fxp_sample = Fxp(data[line][column], True, word_bits,
                                     frac_bits)
                    fxp_val[line][column] = fxp_sample.get_val()
                    fxp_bin[line][column] = fxp_sample.bin()
                    error.append(
                        np.nan_to_num(
                            (data[line][column] - fxp_val[line][column]) /
                            data[line][column]))
    with open('report.txt', 'a') as r:
        r.write(f_ + ' min: ' + str(np.min(fxp_val)) + ' - max: ' +
Exemple #4
0
from fxpmath import Fxp
import numpy as np

x = np.array([[1 / 3, 1 / 3]])
y = np.array([[1 / 3], [1 / 3]])

print("X original = ", x)
print("Y original = ", y)

print("-----------")

x_fxp = Fxp(x, signed=False, n_word=20, n_frac=15)
y_fxp = Fxp(y, signed=False, n_word=20, n_frac=15)

print("X fxp =", x_fxp.get_val())
print("Y fxp =", y_fxp.get_val())

print("-----------")

print(x_fxp.info(verbose=3))

print("-----------")
print("Dot product without scaling = ", x_fxp.get_val().dot(y_fxp.get_val()))
print("Dot prod scaled = ",
      Fxp(x_fxp.get_val().dot(y_fxp.get_val()), n_word=20, n_frac=15))
print("Dot prod without specifications = ",
      Fxp(x_fxp.get_val().dot(y_fxp.get_val())))

dot_fpx = Fxp(None, signed=True, n_word=20, n_frac=15)
dot_fpx.equal(x_fxp().dot(y_fxp()))