def __compute_vx_inputs_and_costs(self, R, delta): """ Computes the list of optimal inputs and costs at vertices of simplex R, using delta commutation. Parameters ---------- R : list Simplex in vertex-representation at whose vertices to compute the vertex inputs and costs. delta : np.array Commutation to use of the calculation. Returns ------- vx_inputs_and_costs : list The list of optimal inputs and costs at the vertices of simplex R. """ Nvx = len(R) vx_inputs_and_costs = [None for _ in range(Nvx)] for i in range(Nvx): vertex = R[i] vx_inputs_and_costs[i] = self.P_theta_delta(theta=vertex, delta=delta) status = self.nlp.status if status != cvx.OPTIMAL and status != cvx.OPTIMAL_INACCURATE: raise cvx.SolverError('problem infeasible') return vx_inputs_and_costs
def solve_problem_cp_backups(cp_kws_backups=None, *args, **kwargs): """ Same as solve_problem_cp() but allows for back up solvers in case the first one you tried fails.. Parameters ---------- cp_kws_backups: list of dicts List of solver key word argumenst. Will try each solver in this list until one does not fail. *args, **kwargs: arguments to solve_problem_cp that remain the same for every solver. """ if cp_kws_backups is None: cp_kws_backups = [None] for cp_kws in cp_kws_backups: try: return solve_problem_cp(cp_kws=cp_kws, *args, **kwargs) except cp.SolverError as e: print(e) raise cp.SolverError('None of the solvers worked')
def solve(problem:cvxpy.Problem, solvers:List[Tuple[str, Dict]] = DEFAULT_SOLVERS): """ Try to solve the given cvxpy problem using the given solvers, in order, until one succeeds. See here https://www.cvxpy.org/tutorial/advanced/index.html for a list of supported solvers. :param solvers list of tuples. Each tuple is (name-of-solver, keyword-arguments-to-solver) """ is_solved=False for (solver, solver_kwargs) in solvers: # Try the first n-1 solvers. try: if solver==cvxpy.SCIPY: problem.solve(solver=solver, scipy_options=dict(solver_kwargs)) # WARNING: solve changes both its arguments! else: problem.solve(solver=solver, **solver_kwargs) logger.info("Solver %s [%s] succeeds", solver, solver_kwargs) is_solved = True break except cvxpy.SolverError as err: logger.info("Solver %s [%s] fails: %s", solver, solver_kwargs, err) if not is_solved: raise cvxpy.SolverError(f"All solvers failed: {solvers}") if problem.status == "infeasible": raise ValueError("Problem is infeasible") elif problem.status == "unbounded": raise ValueError("Problem is unbounded")
def solve_problem_cp(var, objective, constraints, cp_kws={}, warm_start=False, verbosity=0): """ Solves a cvxpy problem that has already been steup. Parameters ---------- var: The cvxpy variable. objective: The cvxpy objective function. constraints: list List of cvxpy constraints. cp_kws: dict Key word arguments to cvxpy.Problem().solve() warm_start: bool Whether to warm start the solver from an initial value provided through var. verbosity: int How much printout. Output ------ value, opt_val, prob value: array-like The solution value. opt_val: float The optimal value prob: The cvxpy problem. """ if cp_kws is None: cp_kws = {} prob = cp.Problem(cp.Minimize(objective), constraints) opt_val = prob.solve(warm_start=warm_start, verbose=verbosity >= 2, **cp_kws) if verbosity >= 1: print("status:", prob.status) print("optimal value", prob.value) if var.value is None: raise cp.SolverError('cvxpy failed to converge! {}'. format(prob.status)) return var.value, opt_val, prob
def save_optimization_results(self, opt_window_num, sub_index, prob, obj_expression, cvx_error_msg): """ Checks if there was a solution to the optimization. If not, report the problem to the user. If there was a solution, then saves results within each instance. Args: opt_window_num: sub_index: prob: obj_expression: cvx_error_msg: any error message that might have occurred during problem solve """ TellUser.info(f'Optimization problem was {prob.status}') # save solver used self.solvers.append(prob.solver_stats.solver_name) if (prob.status == 'infeasible') or (prob.status == 'unbounded'): # tell the user and throw an error specific to the problem being infeasible/unbounded error_msg = f'Optimization window {opt_window_num} was {prob.status}. No solution found. Look in *.log for for information' TellUser.error(cvx_error_msg) raise cvx.SolverError(error_msg) # evaluate optimal objective expression for cost, func in obj_expression.items(): try: obj_expression[cost] = func.value except AttributeError: continue obj_values = pd.DataFrame(obj_expression, index=[opt_window_num]) # then add objective expressions to financial obj_val self.objective_values = pd.concat([self.objective_values, obj_values]) # GENERAL CHECK ON SOLUTION: check for non zero slack if np.any(abs(obj_values.filter(regex="_*slack$")) >= 1): TellUser.warning( 'non-zero slack variables found in optimization solution') for vs in self.service_agg.value_streams.values(): vs.save_variable_results(sub_index) for der in self.poi.active_ders: # record the solution of the variables and run again der.save_variable_results(sub_index) # calculate degradation in Battery instances if der.tag == "Battery": der.calc_degradation(opt_window_num, sub_index[0], sub_index[-1])
def dominating_allocation_with_bounded_sharing(instance: Any, thresholds: List) -> Allocation: """ Finds an allocation in which each agent i gets value at least thresholds[i], and has at most n-1 sharings, where n is the number of agents. IDEA: find a Basic Feasible Solution (BFS) of a linear program. NOTE: some solvers return a BFS by default (particularly, those running Simplex). >>> logger.setLevel(logging.WARNING) >>> instance = [[8,2],[5,5]] >>> dominating_allocation_with_bounded_sharing(instance, thresholds=[0,0]).round(3) Agent #0 gets {} with value 0. Agent #1 gets { 100.0% of 0, 100.0% of 1} with value 10. <BLANKLINE> >>> dominating_allocation_with_bounded_sharing(instance, thresholds=[1,1]).round(3) Agent #0 gets { 12.5% of 0} with value 1. Agent #1 gets { 87.5% of 0, 100.0% of 1} with value 9.38. <BLANKLINE> >>> dominating_allocation_with_bounded_sharing(instance, thresholds=[2,2]).round(3) Agent #0 gets { 25.0% of 0} with value 2. Agent #1 gets { 75.0% of 0, 100.0% of 1} with value 8.75. <BLANKLINE> >>> dominating_allocation_with_bounded_sharing(instance, thresholds=[5,5]).round(3) Agent #0 gets { 62.5% of 0} with value 5. Agent #1 gets { 37.5% of 0, 100.0% of 1} with value 6.88. <BLANKLINE> """ # logger.info("Finding an allocation with thresholds %s", thresholds) v = ValuationMatrix(instance) allocation_vars = cvxpy.Variable((v.num_of_agents, v.num_of_objects)) feasibility_constraints = [ sum([allocation_vars[i][o] for i in v.agents()]) == 1 for o in v.objects() ] positivity_constraints = [ allocation_vars[i][o] >= 0 for i in v.agents() for o in v.objects() ] utilities = [ sum([allocation_vars[i][o] * v[i][o] for o in v.objects()]) for i in v.agents() ] utility_constraints = [ utilities[i] >= thresholds[i] for i in range(v.num_of_agents - 1) ] constraints = feasibility_constraints + positivity_constraints + utility_constraints problem = cvxpy.Problem(cvxpy.Maximize(utilities[v.num_of_agents - 1]), constraints) logger.info("constraints: %s", constraints) solvers = [ (cvxpy.SCIPY, { 'method': 'highs-ds' }), # Always finds a BFS (cvxpy.MOSEK, { "bfs": True }), # Always finds a BFS (cvxpy.OSQP, {}), # Default - not sure it returns a BFS (cvxpy.SCIPY, {}), # Default - not sure it returns a BFS ] solve(problem, solvers=solvers) if problem.status == "optimal": allocation_matrix = allocation_vars.value return allocation_matrix else: raise cvxpy.SolverError( f"No optimal solution found: status is {problem.status}")
def _calc_MVPFP(factor_name, start_date, end_date=None, month_end=True, save=False): """ 构建目标因子的最小波动纯因子组合(Minimum Volatility Pure Factor Portfolio, MVPFP) Parameters: -------- :param factor_name: str alpha因子名称, e.g: SmartMoney :param start_date: datetime-like, str 开始日期, e.g: YYYY-MM-DD, YYYYMMDD :param end_date: datetime-like, str, 默认为None 结束日期, e.g: YYYY-MM-DD, YYYYMMDD :param month_end: bool, 默认为True 是否只计算月末日期的因子载荷 :param save: bool, 默认为False 是否保存计算结果 :return: CWeightHolding类 最小波动纯因子组合权重数据 -------- 具体优化算法:暴露1单位目标因子敞口, 同时保持其余所有风险因子的敞口为0, 并具有最小预期波动率的组合 Min: W'VW s.t. W'X_beta = 0 W'x_target = 1 其中: W: 最小波动纯因子组合对应的权重 V: 个股协方差矩阵 X_beta: 个股风格因子载荷矩阵 x_target: 个股目标因子载荷向量 """ start_date = Utils.to_date(start_date) if end_date is None: trading_days_series = Utils.get_trading_days(end=start_date, ndays=1) else: end_date = Utils.to_date(end_date) trading_days_series = Utils.get_trading_days(start=start_date, end=end_date) CRiskModel = Barra() mvpfp_holding = CWeightHolding() for calc_date in trading_days_series: if month_end and (not Utils.is_month_end(calc_date)): continue # 取得/计算calc_date的个股协方差矩阵数据 stock_codes, arr_stocks_covmat = CRiskModel.calc_stocks_covmat( calc_date) # 取得个股风格因子载荷矩阵数据 df_stylefactor_loading = CRiskModel.get_StyleFactorloading_matrix( calc_date) # df_stylefactor_loading.set_index('code', inplace=True) # df_stylefactor_loading = df_stylefactor_loading.loc[stock_codes] # 按个股顺序重新排列 # arr_stylefactor_loading = np.array(df_stylefactor_loading) # 取得个股目标因子载荷向量数据(正交化后的因子载荷) df_targetfactor_loading = _get_factorloading( factor_name, calc_date, alphafactor_ct.FACTORLOADING_TYPE['ORTHOGONALIZED']) df_targetfactor_loading.drop(columns='date', inplace=True) df_targetfactor_loading.rename(columns={ 'id': 'code', 'factorvalue': factor_name }, inplace=True) df_factorloading = pd.merge(left=df_stylefactor_loading, right=df_targetfactor_loading, how='inner', on='code') df_factorloading.set_index('code', inplace=True) df_stylefactor_loading = df_factorloading.loc[ stock_codes, riskfactor_ct.STYLE_RISK_FACTORS] arr_stylefactor_laoding = np.array(df_stylefactor_loading) df_targetfactor_loading = df_factorloading.loc[stock_codes, factor_name] arr_targetfactor_loading = np.array(df_targetfactor_loading) # 优化计算最小波动纯因子组合权重 V = arr_stocks_covmat X_beta = arr_stylefactor_laoding x_target = arr_targetfactor_loading N = len(stock_codes) w = cvx.Variable((N, 1)) risk = cvx.quad_form(w, V) constraints = [ cvx.matmul(w.T, X_beta) == 0, cvx.matmul(w.T, x_target) == 1 ] prob = cvx.Problem(cvx.Minimize(risk), constraints) prob.solve() if prob.status == cvx.OPTIMAL: datelabel = Utils.datetimelike_to_str(calc_date, dash=False) df_holding = pd.DataFrame({ 'date': [datelabel] * len(stock_codes), 'code': stock_codes, 'weight': w.value }) mvpfp_holding.from_dataframe(df_holding) if save: holding_path = os.path.join( SETTINGS.FACTOR_DB_PATH, eval('alphafactor_ct.' + factor_name.upper() + '.CT')['db_file'], 'mvpfp', '{}_{}.csv'.format(factor_name, datelabel)) mvpfp_holding.save_data(holding_path) else: raise cvx.SolverError( "%s优化计算%s最小纯因子组合失败。" % (Utils.datetimelike_to_str(calc_date), factor_name)) return mvpfp_holding
def stabilize(desiredShot, aspectRatio, noDataFrames, imageSize, fps, lambda1=0.002, lambda2=0.0001, zoomSmooth=1): """From a time sequence of unstabilized frame boxes, compute a stabilized frame. All parameters are normalized with respect to frame size and time, so that simultaneaously doubling the imageSize and the desiredShot does not change the solution, and neither does using twice as many frames and doubling the fps. The main differences with the paper are: - only D, L11 and L13 terms are implemented - zoomSmooth was added If a frame in desiredShot goes outside of the original image, it is cropped. Reference: Gandhi Vineet, Ronfard Remi, Gleicher Michael Multi-Clip Video Editing from a Single Viewpoint European Conference on Visual Media Production (CVMP) 2014 http://imagine.inrialpes.fr/people/vgandhi/GRG_CVMP_2014.pdf Keyword arguments: desiredShot -- a n x 4 numpy array containing on each line the box as [xmin, ymin, xmax, ymax] lambda1 -- see eq. (10) in paper lambda2 -- see eq. (10) in paper zoomSmooth -- a factor applied on the terms that deal with frame size in the regularization term: raise if the stabilized frame zooms in and out too much aspectRatio -- the desired output aspect ration (e.g. 16/9.) noDataFrames -- the list of frames that have no desiredShot information - only regularization is used to stabilize these frames imageSize -- [xmin, ymin, xmax, ymax] for the original image (typically [0,0,1920,1080] for HD) fps -- number of frames per seconds in the video - used for normalization """ # print "noDataFrames:", noDataFrames # set to desiredShot[noDataFrames, :] = 0. imageHeight = float(imageSize[1]) imageWidth = float(imageSize[0]) # crop the desiredShot to the image window # we keep a 1-pixel margin to be sure that constraints can be satisfied margin = 1 low_x1_flags = desiredShot[:, 0] < (0. + margin) desiredShot[low_x1_flags, 0] = 0. + margin low_x2_flags = desiredShot[:, 2] < (0. + margin) desiredShot[low_x2_flags, 2] = 0. + margin high_x1_flags = desiredShot[:, 0] > (imageWidth - margin) desiredShot[high_x1_flags, 0] = imageWidth - margin high_x2_flags = desiredShot[:, 2] > (imageWidth - margin) desiredShot[high_x2_flags, 2] = imageWidth - margin low_y1_flags = desiredShot[:, 1] < (0. + margin) desiredShot[low_y1_flags, 1] = 0. + margin low_y2_flags = desiredShot[:, 3] < (0. + margin) desiredShot[low_y2_flags, 3] = 0. + margin high_y1_flags = desiredShot[:, 1] > (imageHeight - margin) desiredShot[high_y1_flags, 1] = imageHeight - margin high_y2_flags = desiredShot[:, 3] > (imageHeight - margin) desiredShot[high_y2_flags, 3] = imageHeight - margin # Make sure that a crop of the given aspectRatio can be contained in imageSize and can contain the desiredShot. # This may be an issue eg. when doing a 16/9 or a 4/3 movie from 2K. # else, we must cut the desiredshot on both sides. for k in range(desiredShot.shape[0]): if (desiredShot[k, 2] - desiredShot[k, 0]) > (imageHeight * aspectRatio - margin): xcut = (desiredShot[k, 2] - desiredShot[k, 0]) - \ (imageHeight * aspectRatio - margin) desiredShot[k, 2] -= xcut / 2 desiredShot[k, 0] += xcut / 2 if (desiredShot[k, 3] - desiredShot[k, 1]) > (imageWidth / aspectRatio - margin): ycut = (desiredShot[k, 3] - desiredShot[k, 1]) - \ (imageWidth / aspectRatio - margin) desiredShot[k, 3] -= ycut / 2 desiredShot[k, 1] += ycut / 2 x_center = (desiredShot[:, 0] + desiredShot[:, 2]) / 2. y_center = (desiredShot[:, 1] + desiredShot[:, 3]) / 2. # elementwise maximum of each array half_height_opt = np.maximum((desiredShot[:, 2] - desiredShot[:, 0]) / aspectRatio, ((desiredShot[:, 3] - desiredShot[:, 1]))) / 2 # smooth x_center y_center and half_height_opt using a binomial filter (Marchand and Marmet 1983) # eg [1 2 1]/4 or [1 4 6 4 1]/16 (obtained by applying it twice) # TODO: ignore noDataFrames when smoothing! x_center_residual = x_center # binomial_3(x_center_residual) # binomial_3(x_center_residual) y_center_residual = y_center # binomial_3(y_center_residual) # binomial_3(y_center_residual) half_height_opt_residual = half_height_opt # binomial_3(half_height_opt_residual) # binomial_3(half_height_opt_residual) # we subtract 0.001 pixel to be sure that constraints can be satisfied half_width = (desiredShot[:, 2] - desiredShot[:, 0]) / 2. - 0.001 zero_flags = half_width[:] < 0 half_width[zero_flags] = 0. half_height = (desiredShot[:, 3] - desiredShot[:, 1]) / 2. - 0.001 zero_flags = half_height[:] < 0 half_height[zero_flags] = 0. # now trick the constraints so that there are no inner inclusion constraints at noDataFrames x_center[noDataFrames] = imageWidth / 2. half_width[noDataFrames] = -imageWidth / 2. # negative on purpose y_center[noDataFrames] = imageHeight / 2. half_height[noDataFrames] = -imageHeight / 2. # negative on purpose half_height_opt[noDataFrames] = imageHeight / 2. assert ((x_center - half_width) >= 0).all() and ((x_center + half_width) <= imageWidth).all() assert ((y_center - half_height) >= 0).all() and ((y_center + half_height) <= imageHeight).all() n = x_center.size print ("n:", n) e = np.ones(shape=(n)) x = cvx.Variable(n) y = cvx.Variable(n) h = cvx.Variable(n) # half height (see sec. 4 in the paper) # compute the opposite of noDataFrames weights = np.ones(n) weights[noDataFrames] = 0. # do not use residuals on the optimal frame where there's no data # for f in [97, 98, 99, 100]: # print f, weights[f], x_center[f], y_center[f], half_height_opt[f] # normalize with image height # version 1: weights /= imageHeight expr = cvx.sum_squares(weights*(x_center_residual - x)) + cvx.sum_squares(weights*( y_center_residual - y)) + cvx.sum_squares((weights/zoomSmooth)*(half_height_opt_residual - h)) expr /= n # normalize by the number of images, get a cost per image # end of version 1 # version 2: # dataFrames = np.nonzero(weights) # expr = cvx.sum_squares(x_center[dataFrames] - x[dataFrames]) + \ # cvx.sum_squares(y_center[dataFrames] - y[dataFrames]) + \ # cvx.sum_squares(half_height_opt[dataFrames] - h[dataFrames]) / (zoomSmooth*zoomSmooth) # expr /= (imageHeight*imageHeight)*n # normalize by the number of images, get a cost per image # end of version 2 if lambda1 != 0.: lambda1Factor = lambda1 * fps / imageHeight # expr += lambda1Factor * (cvx.norm(D1 * x, 1) + cvx.norm(D1 * y, 1) + cvx.norm(D1 * h, 1) * zoomSmooth) if n > 1: expr += lambda1Factor * \ (cvx.tv(x) + cvx.tv(y) + cvx.tv(h) * zoomSmooth) if lambda2 != 0.: lambda2Factor = lambda2 * fps * fps * fps / imageHeight # expr += lambda2Factor * (cvx.norm(D3 * x, 1) + cvx.norm(D3 * y, 1) + cvx.norm(D3 * h, 1) * zoomSmooth) if n > 2: expr += lambda2Factor * (cvx.norm(x[3:] - 3*x[2:n-1] + 3*x[1:n-2] - x[0:n-3], 1) + cvx.norm(y[3:] - 3*y[2:n-1] + 3*y[1:n-2] - y[0:n-3], 1) + cvx.norm(h[3:] - 3*h[2:n-1] + 3*h[1:n-2] - h[0:n-3], 1) * zoomSmooth) obj = cvx.Minimize(expr) # print expr print ("H=%d, W=%d lambda1=%f lambda2=%f zoomSmooth=%f fps=%f imageHeight=%f" % ( imageHeight, imageWidth, lambda1, lambda2, zoomSmooth, fps, imageHeight)) # note that the following constraints are tricked (see above) at noDataFrames, using negative values for half_width and half_height constraints = [h >= 0, (x - aspectRatio * h) >= 0, (x - aspectRatio * h) <= (x_center - half_width), (x + aspectRatio * h) >= (x_center + half_width), (x + aspectRatio * h) <= imageWidth, (y - h) >= 0, (y - h) <= (y_center - half_height), (y + h) >= (y_center + half_height), (y + h) <= imageHeight] prob = cvx.Problem(obj, constraints) tryagain = True tryreason = "" if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: # ECOS, the default solver, is much better at solving our problems, especially at handling frames where the actor is not visible result = prob.solve(solver=cvx.ECOS, verbose=True) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve(solver=cvx.SCS, verbose=True) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve(solver=cvx.CVXOPT, verbose=True) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve(solver=cvx.CVXOPT, kktsolver=cvx.ROBUST_KKTSOLVER, verbose=True) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain: raise cvx.solverError(tryreason) if prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: raise cvx.SolverError('Problem is infeasible or unbounded') print ("result=", result, "\n") optimised_xcenter = x.value #.reshape(n, 1) optimised_ycenter = y.value #.reshape(n, 1) optimised_height = h.value #.reshape(n, 1) return np.hstack([optimised_xcenter - aspectRatio * optimised_height, optimised_ycenter - optimised_height, optimised_xcenter + aspectRatio * optimised_height, optimised_ycenter + optimised_height])
def stabilize_chunk(desiredShot, aspectRatio, noDataFrames, imageSize, fps, crop_factor, apparent_motion, external_boundaries, screen_pos, lambda1=0.002, lambda2=0.0001, zoomSmooth=1.5, lambda3=0.005): """From a time sequence of unstabilized frame boxes, compute a stabilized frame. All parameters are normalized with respect to frame size and time, so that simultaneaously doubling the imageSize and the desiredShot does not change the solution, and neither does using twice as many frames and doubling the fps. The main differences with the paper are: - only D, L11 and L13 terms are implemented - zoomSmooth was added - the shots are optimized over chunks of shot_s*2 = 10s, and the five first seconds are kept. This makes the problem a lot more tractable. If a frame in desiredShot goes outside of the original image, it is cropped. Reference: Gandhi Vineet, Ronfard Remi, Gleicher Michael Multi-Clip Video Editing from a Single Viewpoint European Conference on Visual Media Production (CVMP) 2014 http://imagine.inrialpes.fr/people/vgandhi/GRG_CVMP_2014.pdf Keyword arguments: desiredShot -- a n x 4 numpy array containing on each line the box as [xmin, ymin, xmax, ymax] lambda1 -- see eq. (10) in paper lambda2 -- see eq. (10) in paper zoomSmooth -- a factor applied on the terms that deal with frame size in the regularization term: raise if the stabilized frame zooms in and out too much aspectRatio -- the desired output aspect ration (e.g. 16/9.) noDataFrames -- the list of frames that have no desiredShot information - only regularization is used to stabilize these frames imageSize -- [xmin, ymin, xmax, ymax] for the original image (typically [0,0,1920,1080] for HD) fps -- number of frames per seconds in the video - used for normalization """ #print "noDataFrames:", noDataFrames # print(noDataFrames, desiredShot) # set to desiredShot[noDataFrames, :] = 0. imageHeight = float(imageSize[1]) imageWidth = float(imageSize[0]) len_w = imageWidth/2 len_h = imageHeight/2 if imageHeight* aspectRatio < imageWidth: len_w = round((imageHeight*aspectRatio)/2) elif imageWidth / aspectRatio < imageHeight: len_h = round((imageWidth / aspectRatio)/2) # crop the desiredShot to the image window # we keep a 1-pixel margin to be sure that constraints can be satisfied margin = 1 low_x1_flags = desiredShot[:, 0] < (0. + margin) desiredShot[low_x1_flags, 0] = 0. + margin low_x2_flags = desiredShot[:, 2] < (0. + margin) desiredShot[low_x2_flags, 2] = 0. + margin high_x1_flags = desiredShot[:, 0] > (imageWidth - margin) desiredShot[high_x1_flags, 0] = imageWidth - margin high_x2_flags = desiredShot[:, 2] > (imageWidth - margin) desiredShot[high_x2_flags, 2] = imageWidth - margin low_y1_flags = desiredShot[:, 1] < (0. + margin) desiredShot[low_y1_flags, 1] = 0. + margin low_y2_flags = desiredShot[:, 3] < (0. + margin) desiredShot[low_y2_flags, 3] = 0. + margin high_y1_flags = desiredShot[:, 1] > (imageHeight - margin) desiredShot[high_y1_flags, 1] = imageHeight - margin high_y2_flags = desiredShot[:, 3] > (imageHeight - margin) desiredShot[high_y2_flags, 3] = imageHeight - margin # Make sure that a crop of the given aspectRatio can be contained in imageSize and can contain the desiredShot. # This may be an issue eg. when doing a 16/9 or a 4/3 movie from 2K. # else, we must cut the desiredshot on both sides. for k in range(desiredShot.shape[0]): if (desiredShot[k, 2] - desiredShot[k, 0]) > (imageHeight * aspectRatio - margin): xcut = (desiredShot[k, 2] - desiredShot[k, 0]) - \ (imageHeight * aspectRatio - margin) desiredShot[k, 2] -= xcut / 2 desiredShot[k, 0] += xcut / 2 if (desiredShot[k, 3] - desiredShot[k, 1]) > (imageWidth / aspectRatio - margin): ycut = (desiredShot[k, 3] - desiredShot[k, 1]) - \ (imageWidth / aspectRatio - margin) desiredShot[k, 3] -= ycut / 2 desiredShot[k, 1] += ycut / 2 # print("desiredShot:", desiredShot) # print("noDataFrames:", noDataFrames) x_center = (desiredShot[:, 0] + desiredShot[:, 2]) / 2. y_center = (desiredShot[:, 1] + desiredShot[:, 3]) / 2. # elementwise maximum of each array half_height_opt = np.maximum((desiredShot[:, 2] - desiredShot[:, 0]) / aspectRatio, ((desiredShot[:, 3] - desiredShot[:, 1]))) / 2 # smooth x_center y_center and half_height_opt using a binomial filter (Marchand and Marmet 1983) # eg [1 2 1]/4 or [1 4 6 4 1]/16 (obtained by applying it twice) # TODO: ignore noDataFrames when smoothing! x_center_residual = x_center # binomial_3(x_center_residual) # binomial_3(x_center_residual) y_center_residual = y_center # binomial_3(y_center_residual) # binomial_3(y_center_residual) half_height_opt_residual = half_height_opt # binomial_3(half_height_opt_residual) # binomial_3(half_height_opt_residual) half_width = (desiredShot[:, 2] - desiredShot[:, 0]) / 2. zero_flags = half_width[:] < 0 half_width[zero_flags] = 0. half_height = (desiredShot[:, 3] - desiredShot[:, 1]) / 2. zero_flags = half_height[:] < 0 half_height[zero_flags] = 0. # now trick the constraints so that there are no inner inclusion constraints at noDataFrames x_center[noDataFrames] = imageWidth / 2. half_width[noDataFrames] = -imageWidth / 2. # negative on purpose y_center[noDataFrames] = imageHeight / 2. half_height[noDataFrames] = -imageHeight / 2. # negative on purpose half_height_opt[noDataFrames] = imageHeight / 2. # print(half_height[noDataFrames], noDataFrames, imageHeight) # print(np.isnan(half_height)) # for i in range(len(desiredShot)): # if (y_center[i] - half_height[i]) < 0 or (y_center[i] + half_height[i]) > imageHeight: # print('indice ',i) external_boundaries[noDataFrames] = [0, 0, 0, 0, 0, 0] l_tl = [] for t in external_boundaries[:, 4]: if t == 1: l_tl.append(0.) else: l_tl.append(1.) tl_inv = np.array(l_tl) l_tr = [] for t in external_boundaries[:, 5]: if t == 1: l_tr.append(0.) else: l_tr.append(1.) tr_inv = np.array(l_tr) tl_inv[noDataFrames] = 0 tr_inv[noDataFrames] = 0 assert ((x_center - half_width) >= 0).all() and ((x_center + half_width) <= imageWidth).all() assert ((y_center - half_height) >= 0).all() and ((y_center + half_height) <= imageHeight).all() n = x_center.size #print "n:", n # We split the problem into chunks of fixed duration. # We compute a subsolution for the current chunk and the next chunk, and ensure continuity for the # variation (1st derivative) and jerk (3rd derivative) terms. # Then we only keep the solution for the current chunk and advance. # compute the opposite of noDataFrames # normalize with image height weightsAll = np.ones(n) / imageHeight weightsAll[noDataFrames] = 0. # do not use residuals on the optimal frame where there's no data #print "weightsAll:", weightsAll #print "half_height_opt:", half_height_opt optimised_xcenter = np.zeros(n) optimised_ycenter = np.zeros(n) optimised_height = np.zeros(n) chunk_s = 5 # size of a chunk in seconds chunk_n = int(chunk_s * fps) # number of samples in a chunk full_chunk_n = chunk_n * 2 # number of samples in a subproblem # starting index for the chunk (also used to check if it is the first chunk) chunk_start = 0 while chunk_start < n: chunk_end = min(n, chunk_start + chunk_n) chunk_size = chunk_end - chunk_start full_chunk_end = min(n, chunk_start + full_chunk_n) full_chunk_size = full_chunk_end - chunk_start # print("chunk:", chunk_start, chunk_end, full_chunk_end) x = cvx.Variable(full_chunk_size) y = cvx.Variable(full_chunk_size) # half height (see sec. 4 in the paper) h = cvx.Variable(full_chunk_size) weights = weightsAll[chunk_start:full_chunk_end] x_center_chunk = x_center[chunk_start:full_chunk_end] y_center_chunk = y_center[chunk_start:full_chunk_end] half_height_chunk = half_height[chunk_start:full_chunk_end] half_width_chunk = half_width[chunk_start:full_chunk_end] x_center_residual_chunk = x_center[chunk_start:full_chunk_end] y_center_residual_chunk = y_center[chunk_start:full_chunk_end] half_height_opt_residual_chunk = half_height_opt_residual[chunk_start:full_chunk_end] #Vector screen pos h_vector = screen_pos[chunk_start:full_chunk_end] #M1 term see paper 4.5 c_x_factor = crop_factor[:, chunk_start:full_chunk_end-1, 0] c_y_factor = crop_factor[:, chunk_start:full_chunk_end-1, 1] c_h_factor = crop_factor[:, chunk_start:full_chunk_end-1, 2] #M2 term see paper 4.5 b_x_factor = apparent_motion[:, chunk_start:full_chunk_end-1, 0] b_y_factor = apparent_motion[:, chunk_start:full_chunk_end-1, 1] b_h_factor = apparent_motion[:, chunk_start:full_chunk_end-1, 2] #E term [xl, xl1, xr, xr1, tl, tr] tl = external_boundaries[chunk_start:full_chunk_end, 4] tr = external_boundaries[chunk_start:full_chunk_end, 5] inv_tl = tl_inv[chunk_start:full_chunk_end] inv_tr = tr_inv[chunk_start:full_chunk_end] xl = external_boundaries[chunk_start:full_chunk_end, 0] xl1 = external_boundaries[chunk_start:full_chunk_end, 1] xr = external_boundaries[chunk_start:full_chunk_end, 2] xr1 = external_boundaries[chunk_start:full_chunk_end, 3] #assert ((x_center_chunk - half_width_chunk) >= 0).all() and ((x_center_chunk + half_width_chunk) <= imageWidth).all() #assert ((y_center_chunk - half_height_chunk) >= 0).all() and ((y_center_chunk + half_height_chunk) <= imageHeight).all() # for f in [97, 98, 99, 100]: # print f, weights[f], x_center[f], y_center[f], half_height_opt[f] expr = cvx.sum_squares(weights*(x_center_residual_chunk + (0.17*aspectRatio*half_height_opt_residual_chunk*h_vector) - x)) + cvx.sum_squares(weights*( y_center_residual_chunk - y)) + cvx.sum_squares(weights*(half_height_opt_residual_chunk - h)) expr /= n # normalize by the number of images, get a cost per image # end of version 1 # version 2: #dataFrames = np.nonzero(weights) # expr = cvx.sum_squares(x_center[dataFrames] - x[dataFrames]) + \ # cvx.sum_squares(y_center[dataFrames] - y[dataFrames]) + \ # cvx.sum_squares(half_height_opt[dataFrames] - h[dataFrames]) / (zoomSmooth*zoomSmooth) # expr /= (imageHeight*imageHeight)*n # normalize by the number of images, get a cost per image # end of version 2 if lambda1 != 0.: lambda1Factor = lambda1 * fps / imageHeight # print("lambda 1 ",lambda1Factor) if n > 1: expr += lambda1Factor * \ (cvx.tv(x) + cvx.tv(y) + cvx.tv(h) * zoomSmooth) # if not the first chunk, add continuity with previous samples if chunk_start >= 1: expr += lambda1Factor * (cvx.abs(x[0] - optimised_xcenter[chunk_start - 1]) + cvx.abs(y[0] - optimised_ycenter[chunk_start - 1]) + cvx.abs(h[0] - optimised_height[chunk_start - 1]) * zoomSmooth) if lambda2 != 0.: lambda2Factor = lambda2 * fps * fps * fps / imageHeight # print("lambda 2 ",lambda2Factor) if n > 2: expr += lambda2Factor * (cvx.norm(x[3:] - 3*x[2:full_chunk_size-1] + 3*x[1:full_chunk_size-2] - x[0:full_chunk_size-3], 1) + cvx.norm(y[3:] - 3*y[2:full_chunk_size-1] + 3*y[1:full_chunk_size-2] - y[0:full_chunk_size-3], 1) + cvx.norm(h[3:] - 3*h[2:full_chunk_size-1] + 3*h[1:full_chunk_size-2] - h[0:full_chunk_size-3], 1) * zoomSmooth) # if not the first chunk, add continuity with previous samples if chunk_start >= 3 and chunk_size >= 3: expr += lambda2Factor * ((cvx.abs(x[0] - 3 * optimised_xcenter[chunk_start - 1] + 3 * optimised_xcenter[chunk_start - 2] - optimised_xcenter[chunk_start - 3]) + cvx.abs(x[1] - 3 * x[0] + 3 * optimised_xcenter[chunk_start - 1] - optimised_xcenter[chunk_start - 2]) + cvx.abs(x[2] - 3 * x[1] + 3 * x[0] - optimised_xcenter[chunk_start - 1])) + (cvx.abs(y[0] - 3 * optimised_ycenter[chunk_start - 1] + 3 * optimised_ycenter[chunk_start - 2] - optimised_ycenter[chunk_start - 3]) + cvx.abs(y[1] - 3 * y[0] + 3 * optimised_ycenter[chunk_start - 1] - optimised_ycenter[chunk_start - 2]) + cvx.abs(y[2] - 3 * y[1] + 3 * y[0] - optimised_ycenter[chunk_start - 1])) + (cvx.abs(h[0] - 3 * optimised_height[chunk_start - 1] + 3 * optimised_height[chunk_start - 2] - optimised_height[chunk_start - 3]) + cvx.abs(h[1] - 3 * h[0] + 3 * optimised_height[chunk_start - 1] - optimised_height[chunk_start - 2]) + cvx.abs(h[2] - 3 * h[1] + 3 * h[0] - optimised_height[chunk_start - 1])) * zoomSmooth) if lambda3 != 0.: lambda3Factor = lambda3 * fps / imageHeight lambda2Factor = lambda2 * fps * fps * fps / imageHeight lambdaM = 5 if n > 1: m1_term = 0 for c_x, c_y, c_h in zip(c_x_factor, c_y_factor, c_h_factor): m1_term += lambdaM * (cvx.norm(c_x*(x[1:]-x[0:full_chunk_size-1]), 1) + cvx.norm(c_y*(y[1:]-y[0:full_chunk_size-1]), 1) + cvx.norm(c_h*(h[1:]-h[0:full_chunk_size-1]), 1) * zoomSmooth) if chunk_start >= 1: for c in crop_factor: c_x = c[chunk_start-1, 0] c_y = c[chunk_start-1, 1] c_h = c[chunk_start-1, 2] m1_term += lambdaM * (cvx.norm(c_x*(x[0]-optimised_xcenter[chunk_start-1]), 1) + cvx.norm(c_y*(y[0]-optimised_ycenter[chunk_start-1]), 1) + cvx.norm(c_h*(h[0]-optimised_height[chunk_start-1]), 1) * zoomSmooth) if n > 1: m2_term = 0 for b_x, b_y, b_h_g in zip(b_x_factor, b_y_factor, b_h_factor): b_h = gaussian_filter(b_h_g,sigma=5) # print(b_x, b_x.dtype) m2_term += lambdaM * (cvx.neg((b_x-(x[1:]-x[0:full_chunk_size-1]))*b_x) + cvx.neg((b_y-(y[1:]-y[0:full_chunk_size-1]))*b_y) + cvx.neg((b_h-(h[1:]-h[0:full_chunk_size-1]))*b_h) * zoomSmooth) if chunk_start >=1 : for b in apparent_motion: b_x = b[chunk_start-1, 0] b_y = b[chunk_start-1, 0] b_h = b[chunk_start-1, 0] m2_term += lambdaM * (cvx.neg((b_x-(x[0]-optimised_xcenter[chunk_start-1]))*b_x) + cvx.neg((b_y-(y[0]-optimised_ycenter[chunk_start-1]))*b_y) + cvx.neg((b_h-(h[0]-optimised_height[chunk_start-1]))*b_h) * zoomSmooth) # expr += m1_term + m2_term # if n > 1: # # E out # expr += lambda3Factor * ((inv_tl * cvx.pos(xl1 - x + aspectRatio * h)) + # (inv_tr * cvx.pos(x + aspectRatio * h - xr1)) ) # # # E in # expr += lambda3Factor * ((tl * cvx.pos(x - aspectRatio * h - xl)) + # (tr * cvx.pos(xr - x - aspectRatio * h)) ) obj = cvx.Minimize(expr) #print expr # print("H=%d, W=%d lambda1=%f lambda2=%f zoomSmooth=%f fps=%f imageHeight=%f" % ( # imageHeight, imageWidth, lambda1, lambda2, zoomSmooth, fps, imageHeight)) # note that the following constraints are tricked (see above) at noDataFrames, using negative values for half_width and half_height constraints = [h >= 0, (x - aspectRatio * h) >= 0, (x - aspectRatio * h) <= (x_center_chunk - half_width_chunk), (x + aspectRatio * h) >= (x_center_chunk + half_width_chunk), (x + aspectRatio * h) <= imageWidth, aspectRatio * h <= len_w, (y - h) >= 0, (y - h) <= (y_center_chunk - half_height_chunk), (y + h) >= (y_center_chunk + half_height_chunk), (y + h) <= imageHeight, h <= len_h] prob = cvx.Problem(obj, constraints) tryagain = True tryreason = "" if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: # ECOS, the default solver, is much better at solving our problems, especially at handling frames where the actor is not visible # all tolerances are multiplied by 10 result = prob.solve(solver=cvx.ECOS, verbose=False, abstol = 1e-6, reltol = 1e-5, abstol_inacc = 5e-4, reltol_inacc = 5e-4, feastol_inacc = 1e-3) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve(solver=cvx.SCS, verbose=False, max_iters = 2500, eps = 1e-2) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve(solver=cvx.CVXOPT, verbose=False, abstol = 1e-6, reltol = 1e-5, feastol = 1e-6) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain or prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: tryagain = False try: result = prob.solve( solver=cvx.CVXOPT, kktsolver=cvx.ROBUST_KKTSOLVER, verbose=False, abstol = 1e-6, reltol = 1e-5, feastol = 1e-6) except cvx.SolverError as e: tryagain = True tryreason = str(e) if tryagain: raise cvx.solverError(tryreason) if prob.status == cvx.INFEASIBLE or prob.status == cvx.UNBOUNDED: raise cvx.SolverError('Problem is infeasible or unbounded') #raise ValueError('Yeah!') # print("result=", result, "\n") if full_chunk_end >= n: # last chunk - get the full chunk optimised_xcenter[chunk_start:full_chunk_end] = x.value.reshape( full_chunk_size) optimised_ycenter[chunk_start:full_chunk_end] = y.value.reshape( full_chunk_size) optimised_height[chunk_start:full_chunk_end] = h.value.reshape( full_chunk_size) chunk_start = full_chunk_end else: # only get the chunk and advance optimised_xcenter[chunk_start:chunk_end] = x.value[:chunk_size].reshape( chunk_size) optimised_ycenter[chunk_start:chunk_end] = y.value[:chunk_size].reshape( chunk_size) optimised_height[chunk_start:chunk_end] = h.value[:chunk_size].reshape( chunk_size) chunk_start = chunk_end return np.vstack([optimised_xcenter - aspectRatio * optimised_height, optimised_ycenter - optimised_height, optimised_xcenter + aspectRatio * optimised_height, optimised_ycenter + optimised_height]).transpose()
def _lp() -> _np.ndarray: """ Composes and solves linear problem to find good quantizer. Returns ------- _np.ndarray Solution of LP. Raises ------ cvxpy.SolverError if no solver named `solver` found. """ m = system.m p = system.l A_tilde = system.A + system.B2 @ system.C2 if gain_wv == inf: f = zeros((1 + m**2 * T + p * m * (T - 1), 1)) else: f = zeros((1 + 2 * m**2 * T + p * m * (T - 1), 1)) f[0, 0] = 1 # compose Phi Phi = zeros((m * p * (T - 1), m * m * T)) for i in range(1, T): Phi_dash = kron( system.C1 @ mpow(A_tilde, i - 1) @ system.B2, eye(m) ) for j in range(i, T): # TODO: Any easier way? Phi[(j - 1) * m * p: j * m * p, (j - i) * m * m: (j - i + 1) * m * m] = Phi_dash # making 'matrix -> vector' transposer eye_sumE = kron( ones((1, m * (T - 1))), eye(p) ) eye_sumH = kron( ones((1, m * T)), eye(m) ) # finalize if gain_wv == inf: A = block([ [-ones((p, 1)), zeros((p, m * m * T)), eye_sumE], [zeros((p * m * (T - 1), 1)), Phi, -eye(m * p * (T - 1))], [zeros((p * m * (T - 1), 1)), -Phi, -eye(m * p * (T - 1))], ]) else: A = block([ [-ones((p, 1)), zeros((p, m * m * T)), zeros((p, m * m * T)), eye_sumE], [zeros((p * m * (T - 1), 1)), Phi, zeros((p * m * (T - 1), m * m * T)), -eye(m * p * (T - 1))], [zeros((p * m * (T - 1), 1)), -Phi, zeros((p * m * (T - 1), m * m * T)), -eye(m * p * (T - 1))], [zeros((m, 1)), zeros((m, m * m * T)), eye_sumH, zeros((m, m * p * (T - 1)))], [zeros((m * m * T, 1)), eye(m * m * T), -eye(m * m * T), zeros((m * m * T, m * p * (T - 1)))], [zeros((m * m * T, 1)), -eye(m * m * T), -eye(m * m * T), zeros((m * m * T, m * p * (T - 1)))], ]) # making C A^k B ,changing matrix to vector CAB = zeros(((T - 1) * p, m)) el_CAB = zeros((m * p * (T - 1), 1)) for i in range(1, T): CAkB = system.C1 @ mpow(A_tilde, i) @ system.B2 CAB[(i - 1) * p:i * p, 0:m] = CAkB for j in range(1, p * (T - 1) + 1): for i in range(1, m + 1): el_CAB[i + (j - 1) * m - 1, 0] = CAB[j - 1, i - 1] if gain_wv == inf: b = block([ [-abs(system.C1 @ system.B2) @ ones((m, 1))], [-el_CAB], [el_CAB] ]) else: b = block([ [-abs((system.C1 @ system.B2) @ ones((m, 1)))], [-el_CAB], [el_CAB], [(gain_wv - 1) * ones((m, 1))], [zeros((m * m * T * 2, 1))], ]) # solve LP x = cvxpy.Variable((f.shape[0], 1)) objective = cvxpy.Minimize(f.transpose() @ x) constraints = [A @ x <= b] problem = cvxpy.Problem(objective, constraints) try: problem.solve(solver=solver or None) except cvxpy.SolverError as e: raise cvxpy.SolverError(f"Error from CVXPY.\n{str(e)}") return matrix(x.value)