def assortX(prod, C, p, v, eps, algo=None, db=None, normConst=None, feasibles=None): st = time.time() L = 0 #L is the lower bound of the search space U = max(p) #Scalar here count = 0 queryTimeLog = 0 while (U - L) > eps: K = (U + L) / 2 maxPseudoRev, maxSet, queryTimeLog = get_nn_set( v, p, K, prod, C, db, normConst, algo, feasibles, queryTimeLog) if (maxPseudoRev / v[0]) >= K: L = K # print "going left at count ",count else: U = K # print "going right at count",count count += 1 maxRev = calcRev(maxSet, p, v, prod) timeTaken = time.time() - st return maxRev, maxSet, timeTaken, queryTimeLog
def capAst_AssortExact(prod, C, p, v, meta): def createArray(pminusk, v): return np.multiply(pminusk, v) def linearSearch(p, k, v, C, prod): start = time.time() maxPseudoRev = 0 maxSet = [] bigArray = createArray(p - K, v) candidate_product_idxes = np.argsort(bigArray)[prod + 1 - C:] maxSet = sorted( candidate_product_idxes[bigArray[candidate_product_idxes] > 0]) maxPseudoRev = sum(bigArray[maxSet]) return maxPseudoRev, maxSet, time.time() - start st = time.time() L = 0 #L is the lower bound of the search space U = max(p) #Scalar here count = 0 while (U - L) > meta['eps']: K = (U + L) / 2 maxPseudoRev, maxSet, queryTimeLog = linearSearch(p, K, v, C, prod) print "\t\t\tAssortExact querytime:", queryTimeLog, " for K=", K if (maxPseudoRev / v[0]) >= K: L = K # print "going left at count ",count else: U = K # print "going right at count",count count += 1 maxRev = calcRev(maxSet, p, v, prod) timeTaken = time.time() - st print "\t\tAssortExact Opt Set Size:", len(maxSet) print "\t\tAssortExact Opt Set:", maxSet print "\t\tAssortExact Opt Rev:", maxRev return maxRev, maxSet, timeTaken
def genAst_AssortBZ(prod, C, p, v, meta): L = 0 # L is the lower bound on the objectiv st = time.time() queryTimeLog = 0 count = 0 U = max(p) # U is the upper bound on the objective best_set_revenue = -1 best_set = [] # Inititate NBS parameters and define helper functions #compstep_prob = meta['default_correct_compstep_probability'] compstep_prob = 0.99 if 'correct_compstep_probability' in meta.keys(): if meta['correct_compstep_probability'] >= 0.5: compstep_prob = meta['correct_compstep_probability'] step_width = 1e-2 max_iters = 1000 early_termination_width = meta['eps'] belief_fraction = 0.95 # Initialize Uniform Distribution range_idx = np.arange(L, U, step_width) range_dist = np.ones_like(range_idx) range_dist = range_dist / np.sum(range_dist) range_dist = np.log(range_dist) def get_pivot(range_dist): exp_dist = np.exp(range_dist) alpha = exp_dist.sum() * 0.5 # Finding the median of the distribution requires # adding together many very small numbers, so it's not # very stable. In part, we address this by randomly # approaching the median from below or above. if random.choice([True, False]): try: return range_idx[exp_dist.cumsum() < alpha][-1] except: return range_idx[::-1][exp_dist[::-1].cumsum() < alpha][-1] else: return range_idx[::-1][exp_dist[::-1].cumsum() < alpha][-1] def get_belief_interval(range_dist, fraction=belief_fraction): exp_dist = np.exp(range_dist) epsilon = 0.5 * (1 - fraction) epsilon = exp_dist.sum() * epsilon if (exp_dist[0] < epsilon): left = range_idx[exp_dist.cumsum() < epsilon][-1] else: left = 0 right = range_idx[exp_dist.cumsum() > (exp_dist.sum() - epsilon)][0] return left, right for i in range(max_iters): #logger.info(f"\niteration: {iter_count}") count += 1 # get Median of Distribution median = get_pivot(range_dist) # comparision function maxPseudoRev, maxSet, queryTimeLog = get_nn_set( v, p, median, prod, C, db=meta['db_BZ'], normConst=meta['normConst'], algo='general_case_BZ', feasibles=meta['feasibles'], queryTimeLog=0) # Compare Set Revenue with bestSet provided, and replace bestSet if more optimal current_set_revenue = calcRev(maxSet, p, v, prod) if current_set_revenue > best_set_revenue: best_set, best_set_revenue = maxSet, current_set_revenue if (maxPseudoRev / v[0]) >= median: range_dist[range_idx >= median] += np.log(compstep_prob) range_dist[range_idx < median] += np.log(1 - compstep_prob) else: range_dist[range_idx <= median] += np.log(compstep_prob) range_dist[range_idx > median] += np.log(1 - compstep_prob) # shift all density from lower than best revenue got into upper end shift_density_total = np.sum( np.exp(range_dist[range_idx < best_set_revenue])) if (shift_density_total > 0): range_dist[range_idx < best_set_revenue] = np.log(0) range_dist[range_idx >= best_set_revenue] += np.log( shift_density_total / len(range_dist[range_idx >= best_set_revenue])) # avoid overflows range_dist -= np.max(range_dist) belief_start, belief_end = get_belief_interval(range_dist) if (belief_end - belief_start) <= early_termination_width: break timeTaken = time.time() - st print "\t\tAssortBZ-Z Opt Set Size:", len(best_set) print "\t\tAssortBZ-Z Opt Set:", best_set return best_set_revenue, best_set, timeTaken