def get_u(self): points = npmat.hstack([s.center_position for s in self.solids]) center = npmat.asmatrix(npmat.average(points, axis = 1)).T centered_points = points - center correlation_matrix = (centered_points * centered_points.T)/self.nb_solids u = iterate(correlation_matrix, self.NB_ITERATIONS) return u
def calc_phi_points(points, laserpos, lasertheta): """Given an array of triples points that should be in the plane generated by a laser at laserpos with theta lasertheta, calculate the inclination of the laser plane's normal vector.""" plane_line = ddd.coord(-np.sin(lasertheta), np.cos(lasertheta), 0) normals = np.cross(np.array(plane_line.T)[0], points - np.array(laserpos.T)[0]) return calc_phi_norm(np.average((normals.T / npl.norm(normals, axis = 1)).T, axis = 0), lasertheta)
def colliding_boxes(self): nb_solids = self.nb_solids points = npmat.hstack([s.center_position for s in self.solids]) center = npmat.asmatrix(npmat.average(points, axis = 1)).T centered_points = points - center correlation_matrix = (centered_points * centered_points.T) / nb_solids u = iterate(correlation_matrix, self.NB_ITERATIONS) if self.projected_bounds is None: self.projected_bounds = [] for i in range(nb_solids): self.projected_bounds.append((i, 0, 0.)) self.projected_bounds.append((i, 1, 0.)) min_max_projections = npmat.empty((nb_solids, 2)) for solid_id in range(nb_solids): solid_AABB_corners = self.solids[solid_id].AABB_corners() corners_projections = u.T * solid_AABB_corners min_max_projections[solid_id, 0] = np.min(corners_projections) min_max_projections[solid_id, 1] = np.max(corners_projections) for i in range(2 * nb_solids): solid_id, begin_or_end_id, value = self.projected_bounds[i] new_value = min_max_projections[solid_id, begin_or_end_id] self.projected_bounds[i] = (solid_id, begin_or_end_id, new_value) # TODO: linear sorting self.projected_bounds.sort(key = lambda x : x[2]) output = [] active_solids = [] for i in range(2 * nb_solids): solid_id, begin_or_end_id, value = self.projected_bounds[i] if begin_or_end_id == 0: for active_solid_id in active_solids: if self.solids[active_solid_id].AABB_intersect_with(self.solids[solid_id]): output.append((active_solid_id, solid_id)) active_solids.append(solid_id) else: active_solids.remove(solid_id) return output
def calc_phi(xys, ref_half_plane, view, cameraposor, laserpos, lasertheta): """Given an array of pixel pairs xys from a camera with view and cameraposor and laser with laserpos and lasertheta, calculate the laser inclination based on a known half-plane ref_half_plane. Throws a NoReferenceException if no pixels are in the reference half-plane.""" cref_pos = ddd.unrotate(ref_half_plane.pos - cameraposor.pos, cameraposor) cref_side = ddd.unrotate(ref_half_plane.side, cameraposor) cref_line = np.cross(cref_side, ddd.unrotate(ref_half_plane.normal, cameraposor), axis = 0) # TODO less copy-pasta cpos = np.array([cref_pos[1, 0], -cref_pos[2, 0]]) / cref_pos[0, 0] * ddd.view_number(view) \ + np.array([view.centerx, view.centery]) cline_ = cref_pos / cref_pos[0, 0] - cref_line / cref_line[0, 0] cside_ = np.array([cref_side[1, 0], -cref_side[2, 0]]) cside = np.array([cline_[2, 0], cline_[1, 0]]) if np.dot(cside, cside_) < 0: cside = - cside dxys = xys - cpos dot_products = np.array(np.mat([cside]) * np.mat(dxys).T)[0] good_xys = xys[dot_products >= 0] print("say "+str(np.average(good_xys[:,1]))) if len(good_xys) == 0: raise NoReferenceException() threepoints = ddd.threedize_plane(good_xys, view, cameraposor, ref_half_plane) return calc_phi_points(threepoints, laserpos, lasertheta)
def signal(prices, index): """ Signals to buy/sell stocks. Returns tuple (X1, X2, ..., Xn), where n is amount of stocks, Xi is one of the ('sell', 'buy', None). :param prices: list of price lists of stocks, in time ascending order :param index: list of k(!) price lists of benchmark values for k last periods, in time ascending order, with same time frame as stocks. """ def betas(prices_m): """ Count beta parameters for all stocks, described in 'prices_m' matrix, according to 'index' benchmark. :param prices_m: matrix of prices. Each column represents a stock. Each row represents price at successive time stamp :return: matrix with betas. Each column represents a stock. Each row represents beta at successive time period """ returns_m = ml.divide(ml.subtract(prices_m[1:], prices_m[:-1]), prices_m[:-1]) index_m = ml.matrix(index).T index_returns_m = ml.divide(ml.subtract(index_m[1:], index_m[:-1]), index_m[:-1]) result = ml.empty((k, prices_m.shape[1])) for i in range(k): for j in range(stock_amount): x = returns_m[:, j] y = index_returns_m[:, i] result[i, j] = np.cov(x, y, rowvar=0)[0][1]/np.var(y) return result def regime(reduced_returns_m): """ Make a regime switch based on PCA standard deviation acceleration. :param reduced_returns_m: matrix of PCA. Each column represents a stock. Each row represents price at successive time stamp :return: one of the strings, 'momentum' - if trend is ment to continue its movement, 'mean_reversion' - otherwise """ cross_sect_vol = np.std(reduced_returns_m, axis=1) changes = cross_sect_vol[1:] - cross_sect_vol[:-1] squared_changes = np.square(changes) distance_times = reduced_returns_m.shape[0] - 1 # because there is # T - 1 changes distance = np.zeros(distance_times) for t in range(distance_times): sum_amount = min(t + 1, H) for i in range(sum_amount): distance[t] += squared_changes[t - i, 0] distance[t] = np.sqrt(distance[t]) signal = distance[1:] - distance[:-1] if np.max(signal) > 0: return 'momentum' else: return 'mean_reversion' prices_m = ml.matrix(prices).T # Preparing main matrices for further computations try: log_returns_m = np.log(ml.divide(prices_m[1:], prices_m[:-1])) except TypeError as e: raise WrongPricesError(prices_m) time_period, stock_amount = log_returns_m.shape mean_log_returns_m = ml.average(log_returns_m, axis=0) demeaned_log_returns_m = log_returns_m - mean_log_returns_m covariation_m = demeaned_log_returns_m.T * demeaned_log_returns_m # Count eigenvectors of covariation matrix and compose PCA matrix from them e_values, e_vectors = eig(covariation_m) abs_e_values = np.absolute(e_values) # TODO: np.absolute(e_vectors) or something like that indexed_abs_e_values = [(i, v) for i, v in enumerate(abs_e_values)] w = sorted(indexed_abs_e_values, reverse=False, key=lambda x: x[1]) e_vectors_m = ml.empty((stock_amount, k)) for j in range(k): e_vectors_m[:, j] = e_vectors[:, w[j][0]] # Main part: project returns on PCA universe reduced_returns_m = (e_vectors_m.T * demeaned_log_returns_m.T).T # Count beta parameters with respect to given benchmark index betas_m = betas(prices_m) time = time_period - time_shift if time < H: raise WrongParameterException("time_shift should be less than H") # Collect data from returns in one vector accumulated_reduced_returns = ml.zeros((1, k)) for i in range(H): accumulated_reduced_returns += reduced_returns_m[time - 1 - i] # Make a prediction about further returns behaviour estimation = accumulated_reduced_returns * betas_m + \ mean_log_returns_m if regime_switcher: regime = regime(reduced_returns_m) else: regime = 'mean_reversion' # Finally, decide for each stock, whether we need to sell it as # overvalued or buy as undervalued. Other way around for momentum switch max_recent_log_returns = log_returns_m[-H:].max(0) result = [] for i in range(stock_amount): if max_recent_log_returns[0, i] > estimation[0, i] + epsilon: if regime == 'mean_reversion': result.append('sell') else: result.append('buy') elif max_recent_log_returns[0, i] < estimation[0, i] - epsilon: if regime == 'mean_reversion': result.append('buy') else: result.append('sell') else: result.append(None) return result
with open('uk_players_positions.pkl', 'rb') as p: pos = pickle.load(p) with open('uk_players_avg_ratings.pkl', 'rb') as p: ratings = pickle.load(p) # TRANSFORM LABELS 'POSITIONS' INTO NUMERIC le = preprocessing.LabelEncoder() y = le.fit_transform(pos) print("LABEL DECODING") for i in range(0, 16, 1): print(str(i) + '\t' + str(le.inverse_transform([i]))) v = DictVectorizer(sparse=False) X = v.fit_transform(players) # CLASSIFY FOR POSITION classifiers = [MultinomialNB(), LinearSVC(), LogisticRegression()] for clf in classifiers: clf.fit(X, y) predicted = cross_val_predict(clf, X, y, cv=10) print(str(metrics.classification_report(y, predicted))) # REGRESSION FOR MATCH RATING y = ratings regression = [LinearRegression(), Lasso()] for clf in regression: score = cross_val_score(clf, X, y, scoring='neg_mean_squared_error', cv=10) print(average(score))