def test_induced_trees(self): add_parametric_inference_support() add_parametric_sampling_support() spn = 0.5 * (Gaussian(mean=10, stdev=0.000000001, scope=0) * Categorical(p=[1.0, 0], scope=1)) + \ 0.5 * (Gaussian(mean=50, stdev=0.000000001, scope=0) * Categorical(p=[0, 1.0], scope=1)) rand_gen = np.random.RandomState(17) data = np.zeros((2, 2)) data[1, 1] = 1 data[:, 0] = np.nan sample_instances(spn, data, rand_gen) self.assertAlmostEqual(data[0, 0], 10) self.assertAlmostEqual(data[1, 0], 50)
def setUp(self): add_parametric_inference_support()
left_id = int(params[0, node_id]) right_id = int(params[1, node_id]) left_w = params[2, node_id] right_w = params[3, node_id] # log sum exp trick xleft = LL[instance_pos, left_id] + math.log(left_w) xright = LL[instance_pos, right_id] + math.log(right_w) xstar = max(xleft, xright) LL[instance_pos, node_id] = xstar + math.log(math.exp(xleft - xstar) + math.exp(xright - xstar)) if __name__ == '__main__': add_parametric_inference_support() start = time.perf_counter() rg = RegionGraph(range(28 * 28)) for _ in range(0, 2): # for _ in range(0, 20): rg.random_split(2, 2) rg_layers = rg.make_layers() print("random graph built in ", (time.perf_counter() - start)) start = time.perf_counter() vector_list, root = Make_SPN_from_RegionGraph(rg_layers, np.random.RandomState(100), num_classes=1, num_gauss=20, num_sums=20) print("Make_SPN_from_RegionGraph in ", (time.perf_counter() - start))
def train_spn(window_size=3, min_instances_slice=10000, features=None, number_of_classes=3): if features is None: features = [20, 120] add_parametric_inference_support() add_parametric_text_support() data = get_data_in_window(window_size=window_size, features=features, three_classes=number_of_classes == 3) sss = sk.model_selection.StratifiedShuffleSplit(test_size=0.2, train_size=0.8, random_state=42) for train_index, test_index in sss.split( data[:, 0:window_size * window_size * len(features)], data[:, (window_size * window_size * len(features)) + (int(window_size * window_size / 2))]): X_train, X_test = data[train_index], data[test_index] context_list = list() parametric_list = list() number_of_features = len(features) for _ in range(number_of_features * window_size * window_size): context_list.append(MetaType.REAL) parametric_list.append(Gaussian) for _ in range(window_size * window_size): context_list.append(MetaType.DISCRETE) parametric_list.append(Categorical) ds_context = Context(meta_types=context_list) ds_context.add_domains(data) ds_context.parametric_types = parametric_list spn = load_spn(window_size, features, min_instances_slice, number_of_classes) if spn is None: spn = Sum() for class_pixel in tqdm(range(-window_size * window_size, 0)): for label, count in zip( *np.unique(data[:, class_pixel], return_counts=True)): train_data = X_train[X_train[:, class_pixel] == label, :] branch = learn_parametric( train_data, ds_context, min_instances_slice=min_instances_slice) spn.children.append(branch) spn.weights.append(train_data.shape[0]) spn.scope.extend(branch.scope) spn.weights = (np.array(spn.weights) / sum(spn.weights)).tolist() assign_ids(spn) save_spn(spn, window_size, features, min_instances_slice, number_of_classes) res = np.ndarray((X_test.shape[0], number_of_classes)) for i in tqdm(range(number_of_classes)): tmp = X_test.copy() tmp[:, -int((window_size**2) / 2)] = i res[:, i] = log_likelihood(spn, tmp)[:, 0] predicted_classes = np.argmax(res, axis=1).reshape((X_test.shape[0], 1)) correct_predicted = 0 for x, y in zip(X_test[:, -5], predicted_classes): if x == y[0]: correct_predicted += 1 accuracy = correct_predicted / X_test.shape[0] return spn, accuracy
def setUp(self): add_parametric_inference_support() self.tested = set()
def setUp(self): add_parametric_inference_support() add_histogram_inference_support()
def setUp(self): np.random.seed(17) add_parametric_inference_support() add_parametric_expectation_support() add_histogram_expectation_support() add_piecewise_expectation_support()