def kullback_leibler_divergence(mu1, sigma1, mu2, sigma2): term1 = np.linalg.slogdet(sigma2)[1] - np.linalg.slogdet(sigma1)[1] sigma_inv = np.linalg.inv(sigma2) term2 = np.matrix.trace(np.dot(sigma_inv, sigma1)) tmp = mu2 - mu1 term3 = np.dot(np.transpose(tmp), np.dot(sigma_inv, tmp)) result = 0.5 * (term1 - tmp.shape[0] + term2 + term3) return result
def _setPortfolioInfo(self): port_annual_var: float = round(np.dot(self._weights.T, np.dot(self._dataSimpleCovarianceAnnual, self._weights)), 5) port_annual_volatility: float = round(np.sqrt(port_annual_var), 5) port_annual_simple_ret: float = round(np.sum(self._stats.SimpleReturnsNan.mean() * self._weights) * 252, 5) print('Port Ann Ret', str(round(port_annual_var, 5)*100)+'%') print('Port Ann Volatility/ Risk', str(round(port_annual_volatility, 5)*100)+'%') print('Port Ann Variance', str(round(port_annual_simple_ret, 5)*100)+'%') '''
def predict(self, X, Y, X_train, Y_train): y_pred = np.zeros(len(Y)) for j in range(len(X)): pr = 0 for i in range(len(X_train)): pr += self._alphas[i] * np.dot( X[j], X_train[i]) * Y_train[i] - ( np.dot(self._w, X_train[i]) - Y_train[i]) y_pred[j] = np.sign(pr) accuracy = accuracy_score(Y, y_pred, normalize=True) return accuracy
def calNNY(x, vh, wh, gamaH, thetaJ): bh = calBh(x, vh, gamaH) beita = np.dot(bh, wh) # y = list() # for i in range(len(beita)): # y.append(sigmoid(beita[i] + thetaJ)) # y = np.array(y) y = sigmoid(beita[0] + thetaJ) return y
def calDertaGd(x, y, dataYi, vh, wh, gamaH, thetaJ, yita): gi = calGi(y, dataYi) bh = calBh(x, vh, gamaH) eh = calEh(wh, gi, bh) dertaWh = yita * gi * bh dertaThetaj = -yita * gi dertaVih = yita * np.dot(np.array([x]).T, np.array([eh])) dertaGameH = -yita * eh return np.array([dertaWh]).T, dertaThetaj, dertaVih, np.array([dertaGameH])
def tfidf_similarity(s1, s2): def add_space(s): return ' '.join(list(s)) # 将字中间加入空格 s1, s2 = add_space(s1), add_space(s2) # 转化为TF矩阵 cv = TfidfVectorizer(tokenizer=lambda s: s.split()) corpus = [s1, s2] vectors = cv.fit_transform(corpus).toarray() # 计算TF系数 return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))
def _setMatrices(self, portfolio_data: DataFrame, log_ret: DataFrame, cov_mat: DataFrame): for i in range(self._threshold): weight_arr: np.ndarray = np.random.uniform( size=len(portfolio_data.columns)) weight_arr = weight_arr / np.sum(weight_arr) # saving weights in the array self._weight_matrix[i, :] = weight_arr # Portfolio Returns annual_weighted_log_ret: float = ( (np.sum(log_ret.mean() * weight_arr)) + 1)**252 - 1 # Saving Portfolio returns self._annual_weighted_log_return_matrix[ i] = annual_weighted_log_ret # Saving Portfolio Risk portfolio_sd: float = np.sqrt( np.dot(weight_arr.T, np.dot(cov_mat, weight_arr))) self._risk_matrix[i] = portfolio_sd # Portfolio Sharpe Ratio # Assuming 0% Risk Free Rate sr: float = annual_weighted_log_ret / portfolio_sd self._sharpe_ratio_matrix[i] = sr
def LFM_grad_desc(self, max_iter, alpha=0.001, lamda=0.002): self.M = len(self.R) self.N = len(self.R[0]) # self.P、Q初始值,随机生成 self.P = np.random.rand(self.M, self.K) self.Q = np.random.rand(self.N, self.K) self.Q = self.Q.T # 开始迭代 for step in range(max_iter): # 对所有的用户u、物品i做遍历,对应的特征向量Pu、Qi梯度下降 for u in range(self.M): for i in range(self.N): # 对于每一个大于0的评分,求出预测评分误差 if self.R[u][i] > 0: eui = np.dot(self.P[u, :], self.Q[:, i]) - self.R[u][i] # 带入公式,按照梯度下降算法更新当前的Pu和Qi for k in range(self.K): self.P[u][k] = self.P[u][k] - alpha * (2 * eui * self.Q[k][i] + 2 * lamda * self.P[u][k]) self.Q[k][i] = self.Q[k][i] - alpha * (2 * eui * self.P[u][k] + 2 * lamda * self.Q[k][i]) # ui遍历完成,所有特征向量更新完成,可以得到P、self.Q,可以计算预测评分矩阵 # 统计损失函数 self.cost = 0 for u in range(self.M): for i in range(self.N): if self.R[u][i] > 0: self.cost += (np.dot(self.P[u, :], self.Q[:, i]) - self.R[u][i]) ** 2 # 正则化项 for k in range(self.K): self.cost += lamda * (self.P[u][k] ** 2 + self.Q[k][i] ** 2) # 提前结束迭代 if self.cost < 0.0001: break self.predR = self.P.dot(self.Q) # 预测结果(ndarry)
def calBh(x, vh, gamaH): ah = np.dot(x, vh) bh = list() for i in range(len(ah)): bh.append(sigmoid(ah[i] + gamaH[0, i])) return np.array(bh)
def linear(x, y): a = np.dot(x, y) return a
def pol(x, y, gamma=1, k0=1, degree=3): return np.power(gamma * np.dot(x, y) + k0, degree)
def feedForwardNetwork(inputs, weights, bias): relu_v = np.vectorize(relu) output = relu_v(np.dot(inputs, weights[0]) + bias[0]) return output[-1].tolist().index(max(output[-1].tolist()))