def parse_resp(content: dict, email: str) -> Union[dict, None]: try: if content['breaches']: return result(email=email, service=__name__, is_leak=True) return result(email=email, service=__name__, is_leak=False) except TypeError: return None
def firefox(d: WebDriver, email: str) -> dict: try: if process_search(d=d, email=email): return result(email=email, service=__name__, is_leak=True) return result(email=email, service=__name__, is_leak=False) except Exception as e: print_error(e, service=__name__)
async def haveibeenpwned(email: str) -> dict: try: async with aiohttp.request(method='GET', url=f'{haveibeenpwned_url}{email}', headers=get_headers()) as resp: if resp.status == 200: return result(email=email, service=__name__, is_leak=True) elif resp.status == 404: return result(email=email, service=__name__, is_leak=False) else: await unexpected_status(resp=resp, service=__name__) except Exception as e: print_error(e, service=__name__)
def recentTweets(twitterID, query): query = "statuses/user_timeline.json?user_id="+str(twitterID)+"&count=20" response = fireRequest(query) numResults = len(response) result = utils.result("Twitter", id=twitterID, qtype="status", query=query, numResults=numResults) for match in response: result["results"].append(match) return result
def recentStatus(userID, query): fql = STATUS_QUERY.replace("$UID$", str(userID)).replace("$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[1]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="status", query=query, numResults=numResults) for match in response["data"]: result["results"].append(match) return result
def recentPhotos(userID, query): fql = PHOTO_QUERY.replace("$UID$", str(userID)).replace("$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[0]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="photo", query=query, numResults=numResults) for r in response["data"]: result["results"].append(r) return result
def recentLocationPost(userID, query): fql = LOCATION_POST_QUERY.replace("$UID$", str(userID)).replace("$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[0]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="location_post", query=query, numResults=numResults) for r in response["data"]: result["results"].append(r) return result
def recentPhotos(userID, query): fql = PHOTO_QUERY.replace("$UID$", str(userID)).replace( "$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[0]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="photo", query=query, numResults=numResults) for r in response["data"]: result["results"].append(r) return result
def recentStatus(userID, query): fql = STATUS_QUERY.replace("$UID$", str(userID)).replace( "$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[1]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="status", query=query, numResults=numResults) for match in response["data"]: result["results"].append(match) return result
def recentTweets(twitterID, query): query = "statuses/user_timeline.json?user_id=" + str( twitterID) + "&count=20" response = fireRequest(query) numResults = len(response) result = utils.result("Twitter", id=twitterID, qtype="status", query=query, numResults=numResults) for match in response: result["results"].append(match) return result
def recentLocationPost(userID, query): fql = LOCATION_POST_QUERY.replace("$UID$", str(userID)).replace( "$TIME$", str(int(time.time()) - DIFFTIME)) response = fireRequest(fql, TYPES[0]) numResults = len(response["data"]) result = utils.result("Facebook", id=userID, qtype="location_post", query=query, numResults=numResults) for r in response["data"]: result["results"].append(r) return result
def get_deps(fn, namespace): deps = {} q = [fn] ns = result(namespace) # function -> name # we do not rely on fn.func_name since dependencies # can be mocked rv_ns = {v: k for k, v in namespace.iteritems()} while len(q) > 0: fn = q.pop() args = inspect.getargspec(fn).args deps[rv_ns[fn]] = args for func_name in args: if func_name not in deps: q.append(ns[func_name]) return deps
def call(fn, namespace): """ Execute a function with dependency injection. The namespace is the container of all the dependencies. >>> def a(b): return b*2 >>> def b(): return 1 >>> def c(): return 4 >>> call(a, {'a': a, 'b': b}) 2 >>> call(a, {'a': a, 'b': c}) # mocking ! 8 """ env = {} deps = get_deps(fn, namespace=namespace) l = result(namespace) for n in reversed(list(topsort(deps))): kwargs = {func_name: env[func_name] for func_name in deps[n]} env[n] = l[n](**kwargs) return env[fn.func_name]
def solve_lp(self, c=None, verbose=False, record_objs=True, method='primal_simplex'): if c is None: assert self.c is not None, 'Need objective function' c = self.c if self.model is None: self.build_gurobi_model(c=c) self.set_objective(c) self.set_method(method) t0 = time.time() obj_values = [] iter_times = [] iter_counts = [] def obj_callback(model, where): if where == gp.GRB.Callback.SIMPLEX: obj = model.cbGet(gp.GRB.Callback.SPX_OBJVAL) obj_values.append(obj) iter_times.append(time.time() - t0) iter_count = model.cbGet(gp.GRB.Callback.SPX_ITRCNT) iter_counts.append(iter_count) if record_objs: self.model.optimize(obj_callback) else: self.model.optimize() if self.model.status != gp.GRB.Status.OPTIMAL: raise RuntimeError('Model failed to solve') x_optimal = self.model.getAttr('x', self.x) obj_optimal = self.model.objVal num_steps = self.model.getAttr('IterCount') #solve_time = self.model.getAttr('Runtime') solve_time = time.time() - t0 output = result(0, x=x_optimal, obj=obj_optimal, n_iters=num_steps, solve_time=solve_time, iter_times=iter_times, obj_values=obj_values, iter_counts=iter_counts) return output
seted = True if seted == False: group_(race_pre[4], pre[i], group_test[4], y_test[i], race_prepro[4], prepro[i], group_X[4], X_test.loc[i]) ###result of race race_group = [ 'Metrics - race - white: ', 'Metrics - race - black/african american: ', 'Metrics - race - hispanic/latino: ', 'asian', 'other' ] race_label = [ 'white (AUC = %0.4f)', 'black/african american 41~70 (AUC = %0.4f)', 'hispanic/latino (AUC = %0.4f)', 'asian (AUC = %0.4f)', 'other (AUC = %0.4f)' ] race_legend = [ 'white', 'black/african american', 'hispanic/latino', 'asian', 'other' ] title_p, title_r, title_f = 'thresholds-precision(race)', 'thresholds-recall(race)', 'thresholds-f1score(race)' n = 6 opt_thre = result(opt_thre, race_group, group_test, target_names, race_pre, race_prepro, race_label, race_legend, title_p, title_r, title_f, n) print(opt_thre) ###save -> pickle file thre_objects = (opt_thre, model, y_test, group_test, group_X, X_test) with open(pkl_thre, 'wb') as file: pickle.dump(thre_objects, file)
def parse_resp(content: dict, email: str) -> dict: if content['e']: return result(email=email, service=__name__, is_leak=True) return result(email=email, service=__name__, is_leak=False)
previous_fitness = hallOfFame[0].fitness.values if(iterations_wo_improvement * FREQ == int(max_iterations_wo_improvement / 2)): print(iterations_wo_improvement * FREQ, "iterations without improvement...") bestIndividuals.append(hallOfFame[0].fitness.values) if first: for logbook in ziped[1]: logbooks.append(logbook) first = False else: for k, logbook in enumerate(ziped[1]): logbooks[k] += logbook toolbox.migrate(islands) print("----------END---------") print("Hall of fame:", hallOfFame[0], hallOfFame[0].fitness) # Save results pickleOut = open("./out/" + BENCHMARK_NAME + "_" + str(NUM_OF_ISLANDS) + "_" + str(MIGRATION_RATIO) + "_" + MODEL + ".pickle", "wb") pickle.dump(utils.result( logbooks, bestIndividuals, time.time() - start_time), pickleOut) pickleOut.close() print("\n")
print("Removed: ", sum_removed, "Improvement: ", hallOfFame[0].fitness) if (iterations_wo_improvement * FREQ % int(max_iterations_wo_improvement / 10) == 0): print("iterations_wo_improvement:", (iterations_wo_improvement * FREQ / max_iterations_wo_improvement) * 100, "% time:", round(time.time() - mig_start_time, 2)) mig_start_time = time.time() islandsLog.append([len(island) for island in islands]) bestIndividuals.append([ind.fitness.values for ind in hallOfFame]) toolbox.migrate(islands) print("----------END---------") print("Hall of fame[0]:", hallOfFame[0], hallOfFame[0].fitness) # Save results pickleOut = open( "./out/" + BENCHMARK_NAME + "_" + str(NUM_OF_ISLANDS) + "_" + str(MIGRATION_RATIO) + "_" + MODEL + "_" + str(NUM_OF_OBJECTIVES) + ".pickle", "wb") pickle.dump( utils.result(islandsLog, bestIndividuals, time.time() - start_time), pickleOut) pickleOut.close() print("\n")
def studentCode(self, data): utils.init() q1.printtable(*data) return utils.result()
def parse_resp(content: str, email: str) -> dict: if '<div class="g">' in content: return result(email=email, service=__name__, is_leak=True) return result(email=email, service=__name__, is_leak=False)
if away == 'ARS': s_away = 'ars' if away == 'TOT': s_away = 'tot' if away == 'EVE': s_away = 'eve' if away == 'HUD': s_away = 'hud' if away == 'LIV': s_away = 'liv' if away == 'CRY': s_away = 'cry' if away == 'SOU': s_away = 'sou' if away == 'MNC': s_away = 'mnc' f.write(home + "," + score + "," + away + "," + result(score) + "," + home_score(score) + "," + away_score(score) + "," + goal_goal(score) + "," + over_1(score) + "," + over_2(score) + "," + over_3(score) + "," + over_4(score) + "," + hodd(s_home, s_away) + "," + dodd(s_home, s_away) + "," + aodd(s_home, s_away) + "\n") print(">>database/" + filename) f.close() print("Porting Successfully Finished")
def steepest_descent_augmentation_scheme(P, x, c=None, verbose=False, method='dual_simplex', reset=False, max_time=300, first_warm_start=None, save_first_steps=0, problem_name=''): """ Given a polyhedron P with feasible point x and an objective function c, solve the linear program min{c^T x : x in P} via the steepest descent circuit augmentation scheme. Returns result object containing optimal solution, objective objective, solve time, and other stats. """ if c is not None: P.set_objective(c) t0 = time.time() x_current = x if save_first_steps: np.save('solutions/{}_0.npy'.format(problem_name), x_current) active_inds = P.get_active_constraints(x_current) pm = P.build_polyhedral_model(active_inds=active_inds, method=method) if first_warm_start is not None: print('Using custom warm start') pm.set_solution(first_warm_start) t1 = time.time() build_time = t1 - t0 print('Polyhedral model build time: {}'.format(build_time)) sub_times = {'sd': [], 'step': [], 'solve': [], 'phase_times': []} descent_circuits = [] obj_values = [] step_sizes = [] iter_times = [] simplex_iters = [] iteration = 0 obj_value = P.c.dot(x_current) obj_values.append(obj_value) t2 = time.time() iter_times.append(t2 - t1) # compute steepest-descent direction descent_direction, y_pos, y_neg, steepness, num_steps, solve_time, phase_times = pm.compute_sd_direction( verbose=verbose) simplex_iters.append(num_steps) sub_times['solve'].append(solve_time) sub_times['phase_times'].append(phase_times) t3 = time.time() sub_times['sd'].append(t3 - t2) while abs(steepness) > EPS: t3 = time.time() if reset: pm.reset() # take maximal step x_current, alpha, active_inds = P.take_maximal_step( descent_direction, y_pos, y_neg) if iteration % 50 == 0 or iteration == 1: print('\nIteration {}'.format(iteration)) print('Objective: {}'.format(obj_value)) print('Steepness: {}'.format(steepness)) print('Step length: {}'.format(alpha)) t4 = time.time() obj_value = P.c.dot(x_current) obj_values.append(obj_value) iter_times.append(t4 - t1) sub_times['step'].append(t4 - t3) descent_circuits.append(descent_direction) step_sizes.append(alpha) if math.isinf(alpha): # problem is unbounded return result(status=1, circuits=descent_circuits, steps=step_sizes) # compute steepest-descent direction pm.set_active_inds(active_inds) descent_direction, y_pos, y_neg, steepness, num_steps, solve_time, phase_times = pm.compute_sd_direction( verbose=verbose) t5 = time.time() sub_times['sd'].append(t5 - t4) sub_times['solve'].append(solve_time) sub_times['phase_times'].append(phase_times) simplex_iters.append(num_steps) iteration += 1 current_time = t5 - t1 if current_time > max_time: return result(status=2) if iteration <= save_first_steps: np.save('solutions/{}_{}.npy'.format(problem_name, iteration), x_current) t6 = time.time() total_time = t6 - t1 print('Total time for steepest-descent scheme: {}'.format(total_time)) return result(status=0, x=x_current, obj=P.c.dot(x_current), n_iters=len(step_sizes), solve_time=total_time, iter_times=iter_times, alg_type='steepest-descent', circuits=descent_circuits, steps=step_sizes, simplex_iters=simplex_iters, solve_times=sub_times['solve'], sub_times=sub_times, obj_values=obj_values)
def updateCache(a: int, b: int, sign: str): global cache cache = utils.result(a, b, sign)
def parse_resp(content: dict, email: str) -> dict: if content['data'] and content['data'] != 'E_NOT_VALID': return result(email=email, service=__name__, is_leak=True) return result(email=email, service=__name__, is_leak=False)