def test_make_negative_edges_check_neg_nodes(self): unique_node_ids = list(np.unique(self.nodes.id)) neg_nodes = list( np.unique(np.concatenate((self.ne.subject, self.ne.object)))) self.assertTrue( set(neg_nodes) <= set(unique_node_ids), "Some nodes from negative edges are not in the nodes tsv file")
def dice(img1, img2, labels=None, nargout=1): ''' Dice [1] volume overlap metric The default is to *not* return a measure for the background layer (label = 0) [1] Dice, Lee R. "Measures of the amount of ecologic association between species." Ecology 26.3 (1945): 297-302. Parameters ---------- vol1 : nd array. The first volume (e.g. predicted volume) vol2 : nd array. The second volume (e.g. "true" volume) labels : optional vector of labels on which to compute Dice. If this is not provided, Dice is computed on all non-background (non-0) labels nargout : optional control of output arguments. if 1, output Dice measure(s). if 2, output tuple of (Dice, labels) Output ------ if nargout == 1 : dice : vector of dice measures for each labels if nargout == 2 : (dice, labels) : where labels is a vector of the labels on which dice was computed ''' if labels is None: labels = np.unique(np.concatenate((img1, img2))) # 输出一维数组 labels = np.delete(labels, np.where(labels == 0)) # remove background dicem = np.zeros(len(labels)) for idx, lab in enumerate(labels): top = 2 * np.sum(np.logical_and(img1 == lab, img2 == lab)) bottom = np.sum(img1 == lab) + np.sum(img2 == lab) bottom = np.maximum(bottom, np.finfo(float).eps) # add epsilon. 机器最小的正数 dicem[idx] = top / bottom if nargout == 1: return dicem else: return (dicem, labels)
classifier = [] result = [] for el in v: # select only with the agent.append(el[0]) classifier.append(el[1]) result.append(el[2]) # need to order per agent classifier_array = np.array(classifier) agent_array = np.array(agent) min_cla = np.amin(classifier_array) max_cla = np.amax(classifier_array) unique_element_classifier = np.unique(classifier_array) # print(unique_element_classifier) # print(len(unique_element_classifier)) unique_element_agent = np.unique(agent_array) # print(unique_element_agent) # print(len(unique_element_agent)) dif_cla = max_cla - min_cla real_classifier = np.zeros( len(unique_element_classifier)) # (maxEnd - minEnd) * ((value - minStart) / (maxStart - minStart)) + minEnd; value_agent = agent[0] clax = []
from sklearn.datasets import load_iris from sklearn.datasets import make_blobs from mglearn.datasets import load_extended_boston from sklearn.model_selection import train_test_split import pandas as pd import mglearn from IPython.display import display from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC from pandas import np import matplotlib.pyplot as plt X = np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1], [1, 0, 1, 0]]) y = np.array([0, 1, 0, 1]) counts = {} print(X.shape) for label in np.unique(y): # итерируем по каждому классу # подсчитываем элементы 1 по признаку print("y = {}".format(y)) print("label={}".format(label)) print("y == label -> {}".format(y == label)) print("X = {}".format(X[y == label])) print("sum = {}".format(X[y == label].sum(axis=0))) counts[label] = X[y == label].sum(axis=0) print("Частоты признаков:\n{}".format(counts))
def compute_inertia(a, X): W = [np.mean(pairwise_distances(X[a == c, :])) for c in np.unique(a)] return np.mean(W)