def rel_error(x, y): """Returns relative error""" if isinstance(x, (int, float, Number)): x = float(x) y = float(y) return abs(x - y) / max(1e-8, abs(x) + abs(y)) else: return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
print(flag) img = cv2.imread(dirTrain + img_name_train, 0) ret, thresh = cv2.threshold(img, 50, 255, 0) #thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2) _, contours, hierarchy = cv2.findContours(thresh, 1, 2) cnt = contours[0] M = cv2.moments(cnt) f = [ M['nu20'], M['nu11'], M['nu02'], M['nu30'], M['nu21'], M['nu12'], M['nu03'] ] for ijk, val in enumerate(f): features[flag:ijk] = val if np.sum(np.abs(features[flag])) > 0.2: image_train_list.write(img_name_train + '\n') xyz_train = (img_name_train.strip('.jpg')).split('_') x_train = float(xyz_train[0]) y_train = float(xyz_train[1]) z_train = float(xyz_train[2]) labels[flag, 0] = x_train labels[flag, 1] = y_train labels[flag, 2] = z_train labels_x[flag] = x_train labels_y[flag] = y_train labels_z[flag] = z_train flag = flag + 1 img_name_train = image_train_f.readline()
def MTNE(self): # dictionary D = np.random.rand(self.p, self.m) lambda_ff_list = [] # Author Aprime_list = [] # weight W_list = [] # dense vector F_list = [] # similarity local X_list = [] X_mask_list = [] # all sparse embeddings across all timestamps # F_big=np.zeros((self.q,self.k)) X_big = np.zeros((self.q, self.q)) X_mask_big = np.zeros((self.q, self.q)) S = np.random.rand(self.q, self.q) indexDict_local2global = collections.OrderedDict() indexDict_global2local = dict() globalIndex = 0 for key in self.edgeDict: A = self.edgeDict[key] X = self.initX(A, self.theta) X = X / (np.amax(X) - np.amin(X)) X_list.append(X) X_mask = np.zeros((self.q, self.q)) # number of nodes in the current time n = A.shape[0] Aprime = np.random.rand(n, self.p) Aprime_list.append(Aprime) indexDict = dict() for i in range(n): indexDict[i] = globalIndex + i indexDict_global2local[globalIndex + i] = (key, i) indexDict_local2global[key] = indexDict for i in range(n): i_big = indexDict[i] for j in range(n): j_big = indexDict[j] X_big[i_big, j_big] = X[i, j] X_mask[i_big, j_big] = 1. X_mask_big[i_big, j_big] = 1. X_mask_list.append(X_mask) globalIndex += n W = np.random.rand(n, self.p) W_list.append(W) F = np.random.rand(n, self.m) F_list.append(F) lambda_ff_list.append(random.random()) F_big = self.concatenateMatrixInList(F_list, self.m, 0) loss_t1 = 1000000000.0 print loss_t1 loss_t = self.epsilon + loss_t1 + 1 while abs(loss_t - loss_t1) >= self.epsilon: #% optimize each element in randomized sequence nita = 1. / math.sqrt(self.t) self.t = self.t + 1 loss_t = loss_t1 loss_t1 = 0.0 counter = 0 for key in self.edgeDict: X = X_list[counter] W = W_list[counter] F = F_list[counter] Aprime = Aprime_list[counter] indexDict = indexDict_local2global[key] lambda_ff = lambda_ff_list[counter] X_mask = X_mask_list[counter] P = self.getP(X_mask, F_big, X_big) n = X.shape[0] for i in range(n): # for A z = Aprime[i] - nita * ( np.dot(np.dot(Aprime[i], W.T) - X[i], W) + self.beta * np.dot(np.dot(Aprime[i], D) - F[i], D.T)) update = np.maximum(np.zeros(np.shape(z)), np.abs(z) - nita * self.lamda_pgd) Aprime[i] = np.sign(z) * update # for F lf_part1 = self.beta * F[i] - np.dot(Aprime[i], D) lf_part2 = np.zeros(self.m) i_big_index = indexDict[i] for j in range(self.q): lf_part2 += self.rho * (F[i] - F_big[j]) * S[i_big_index, j] val1 = np.dot(F[i], F_big.T) val2 = val1 - np.ones(self.q) val3 = np.dot(val2, F_big) lf_part3 = 0.01 * val3 F[i] = F[i] - nita * (lf_part1 + lf_part2 + lf_part3) F_big[i_big_index] = F[i] # vec=np.dot(F[i],F_big.T)-np.ones(self.q) # # print vec.shape # lambda_ff=lambda_ff-nita*np.linalg.norm(vec) # for S ls = (S[i_big_index] - P[i_big_index]) - self.epsilon * np.ones( self.q) - self.alpha * S[i_big_index] S[i_big_index] = S[i_big_index] - nita * ls Aprime = self.chechnegtive(Aprime, None, None) LW = np.dot((np.dot(W, Aprime.T) - X), Aprime) + self.lamda * W W = W - nita * LW W = self.chechnegtive(W, None, None) p1 = np.dot(Aprime, D) - F p2 = self.beta * np.dot(Aprime.T, p1) LD = p2 + self.gamma * D D = D - nita * LD W_list[counter] = W F_list[counter] = F Aprime_list[counter] = Aprime lambda_ff_list[counter] = lambda_ff loss_t1_part = self.lossfuction(X, W, Aprime, F, D) loss_t1 += loss_t1_part counter += 1 # loss_last=self.laplacianLoss(simM,F) simMD, simML = self.getLaplacian(S) trval = np.trace(np.dot(np.dot(F_big.T, simML), F_big)) gapval = self.norm(X_mask_big * (S - X_big)) loss_t1 += self.rho * trval + self.eta * gapval if loss_t < loss_t1 and loss_t != 0: break # print loss_t print loss_t1 return [Aprime_list, F_list, S]
f = np.empty([l, moments_num], dtype=float) for i in range(l): cnt = contours[i] area = cv2.contourArea(cnt) if area > min_area: M = cv2.moments(cnt) feature = [ M['nu20'], M['nu11'], M['nu02'], M['nu30'], M['nu21'], M['nu12'], M['nu03'] ] for iii, val in enumerate(feature): f[i, iii] = feature[iii] f = f - f_mean f = np.abs(f) s = np.sum(f, axis=1) index = np.argmin(s) print(str(index)) cnt = contours[index] x, y, w, h = cv2.boundingRect(cnt) x = x - 3 y = y - 3 w = w + 6 h = h + 6 result = img[y:y + h, x:x + w] cv2.imwrite('temp1.jpg', result) #对在x光片中找到的截图保存的检测目标进行图像分割 img = cv2.imread('temp1.jpg')
from facility import * from solver_primitives import * HIDDEN_LAYERS = 4 shapes = (1024, ) * HIDDEN_LAYERS + (10, ) storage = {} activation = builder.ReLU mlp = builder.Sequential() for i, shape in enumerate(shapes[:-1]): mlp.append(builder.Affine(shape)) mlp.append(builder.Export('affine%d' % i, storage)) mlp.append(activation()) mlp.append(builder.Affine(shapes[-1])) mlp.append(builder.Export('affine%d' % (len(shapes) - 1), storage)) model = builder.Model(mlp, 'softmax', (3072, )) initialize(model) X = np.random.normal(0, 1, (64, 3072)) output = model.forward(X, 'train') print 'origin' for key, value in storage.items(): print key, np.std(value) rescale(mlp, X, model.params) rescaled_output = model.forward(X, 'train') print 'rescaled' for key, value in storage.items(): print key, np.std(value) print np.mean(np.abs(output - rescaled_output))