def calculate_storage_capactiy(self, output_params={}): """ Calculates the storage capacity of the BiNAM, given the expected output data and number of ones in the output. Returns the information, the output matrix and the error counts. """ mat_out_res = self.calculate_output_matrix(output_params) N, n = mat_out_res.shape errs = entropy.calculate_errs(mat_out_res, self["mat_out"]) I = entropy.entropy_hetero(errs, n, self["data_params"]["n_ones_out"]) return I, mat_out_res, errs
def calculate_max_storage_capacity(self): """ Calculates the maximum theoretical storage capacity for this network. """ if hasattr(self["mat_out"], "shape") and hasattr(self["mat_in"], "shape"): _, m = self["mat_in"].shape _, n = self["mat_out"].shape mem = binam.BiNAM(m, n) else: mem = binam.BiNAM(self["data_params"]["n_bits_in"], self["data_params"]["n_bits_out"]) mem.train_matrix(self["mat_in"], self["mat_out"]) mat_out_ref = mem.evaluate_matrix(self["mat_in"]) N, n = mat_out_ref.shape errs_ref = entropy.calculate_errs(mat_out_ref, self["mat_out"]) I_ref = entropy.entropy_hetero(errs_ref, n, self["data_params"]["n_ones_out"]) return I_ref, mat_out_ref, errs_ref
def calculate_max_storage_capacity(self): """ Calculates the maximum theoretical storage capacity for this network. """ if hasattr(self["mat_out"], "shape") and hasattr(self["mat_in"], "shape"): _, m = self["mat_in"].shape _, n = self["mat_out"].shape mem = binam.BiNAM(m, n) else: mem = binam.BiNAM( self["data_params"]["n_bits_in"], self["data_params"]["n_bits_out"]) mem.train_matrix(self["mat_in"], self["mat_out"]) mat_out_ref = mem.evaluate_matrix(self["mat_in"]) N, n = mat_out_ref.shape errs_ref = entropy.calculate_errs(mat_out_ref, self["mat_out"]) I_ref = entropy.entropy_hetero(errs_ref, n, self["data_params"]["n_ones_out"]) return I_ref, mat_out_ref, errs_ref
i = 0 for p in xs: print("Iteration: ", p) # Introduce some errors X_part_p0 = np.minimum(X, (np.random.random((n_samples, n_bits)) >= p)) X_part_p1 = np.maximum(X, (np.random.random((n_samples, n_bits)) < p)) Y_part_out_p0_adap = M.evaluate_matrix(X_part_p0) Y_part_out_p1_adap = M.evaluate_matrix(X_part_p1) Y_part_out_p0_fix = M.evaluate_matrix(X_part_p0, threshold=n_ones) Y_part_out_p1_fix = M.evaluate_matrix(X_part_p1, threshold=n_ones) # Calculate the errors and the entropy errs = entropy.calculate_errs(Y_part_out_p0_adap, Y) info_p0_adap[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) fps_p0_adap[i] = np.array(map(lambda x: x["fp"], errs)) fns_p0_adap[i] = np.array(map(lambda x: x["fn"], errs)) errs = entropy.calculate_errs(Y_part_out_p1_adap, Y) info_p1_adap[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) fps_p1_adap[i] = np.array(map(lambda x: x["fp"], errs)) fns_p1_adap[i] = np.array(map(lambda x: x["fn"], errs)) errs = entropy.calculate_errs(Y_part_out_p0_fix, Y) info_p0_fix[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones)
i = 0 for p in xs: print("Iteration: ", p) # Introduce some errors X_part_p0 = np.minimum(X, (np.random.random((n_samples, n_bits)) >= p)) X_part_p1 = np.maximum(X, (np.random.random((n_samples, n_bits)) < p)) Y_part_out_p0_adap = M.evaluate_matrix(X_part_p0) Y_part_out_p1_adap = M.evaluate_matrix(X_part_p1) Y_part_out_p0_fix = M.evaluate_matrix(X_part_p0, threshold=n_ones) Y_part_out_p1_fix = M.evaluate_matrix(X_part_p1, threshold=n_ones) # Calculate the errors and the entropy errs = entropy.calculate_errs(Y_part_out_p0_adap, Y) info_p0_adap[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) fps_p0_adap[i] = np.array(map(lambda x: x["fp"], errs)) fns_p0_adap[i] = np.array(map(lambda x: x["fn"], errs)) errs = entropy.calculate_errs(Y_part_out_p1_adap, Y) info_p1_adap[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) fps_p1_adap[i] = np.array(map(lambda x: x["fp"], errs)) fns_p1_adap[i] = np.array(map(lambda x: x["fn"], errs)) errs = entropy.calculate_errs(Y_part_out_p0_fix, Y) info_p0_fix[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) fps_p0_fix[i] = np.array(map(lambda x: x["fp"], errs)) fns_p0_fix[i] = np.array(map(lambda x: x["fn"], errs)) errs = entropy.calculate_errs(Y_part_out_p1_fix, Y) info_p1_fix[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones)
for s in xs: print("Iteration: ", s) # Train the new sample for s in xrange(s_old, s): M.train(X[s], Y[s]) s_old = s # Evaluate the sample X_part = X[0:(s + 1)] Y_part = Y[0:(s + 1)] Y_part_out = M.evaluate_matrix(X_part) # Calculate the errors and the entropy errs = entropy.calculate_errs(Y_part_out, Y_part) info[i] = entropy.entropy_hetero(errs, n_bits_out=n_bits, n_ones_out=n_ones) n_false_positives[i] = np.sum(map(lambda x: x["fp"], errs)) n_false_negatives[i] = np.sum(map(lambda x: x["fn"], errs)) n_false_positives_mean[i] = np.mean(map(lambda x: x["fp"], errs)) n_false_negatives_mean[i] = np.mean(map(lambda x: x["fn"], errs)) n_false_positives_min[i] = np.min(map(lambda x: x["fp"], errs)) n_false_negatives_min[i] = np.min(map(lambda x: x["fn"], errs)) n_false_positives_max[i] = np.max(map(lambda x: x["fp"], errs)) n_false_negatives_max[i] = np.max(map(lambda x: x["fn"], errs)) i = i + 1 figsize = (cm2inch(11.8), cm2inch(6)) print("Plotting information...")