def random_walk_error(sampling_args): graph = sampling_args['graph'] samples = sampling.main(sampling_args)['sampling_set'] x_hat = sparse_label_propagation(graph, list(samples)) x = [graph.node[idx]['value'] for idx in range(graph.number_of_nodes())] error = normalized_mean_squared_error(x, x_hat) return error
def get_current_nmse(self): graph = self.graph sampling_set = self.sampling_set x = [graph.node[i]['value'] for i in sorted(graph.nodes_iter())] x_hat = sparse_label_propagation(graph, list(sampling_set)) return nmse(x, x_hat)
def _reward(self): x_hat = sparse_label_propagation(self.graph, list(self.sampling_set)) x = [ self.graph.node[idx]['value'] for idx in range(self.graph.number_of_nodes()) ] error = nmse(x, x_hat) reward = (self.slp_maximum_error - error) / self.slp_maximum_error return reward
def get_current_nmse(self): graph = self.graph sampling_set = self.sampling_set x = [ self.graph.node[idx]['value'] for idx in range(self.graph.number_of_nodes()) ] x_hat = sparse_label_propagation(graph, list(sampling_set)) return nmse(x, x_hat)
def slp_minimum_error(graph, sampling_set_size): """Finds the minimum error for given graph and sampling set size. Very slow for large graphs due to exhaustive search. """ sampling_sets = combinations(graph.nodes(), sampling_set_size) x = [graph.node[idx]['value'] for idx in range(graph.number_of_nodes())] errors = [] for sampling_set in sampling_sets: x_hat = sparse_label_propagation(graph, sampling_set) error = normalized_mean_squared_error(x, x_hat) errors.append(error) return min(errors)
def _reward(self): x_hat = sparse_label_propagation(self.graph, list(self.sampling_set)) x = [ self.graph.node[idx]['value'] for idx in range(self.graph.number_of_nodes()) ] error = nmse(x, x_hat) tv = total_variation(self.graph.edges(), x) self.error = error # this + all nodes in action seems best reward = 1.0 - error return reward