def main(): graph, sess = load_graph(FLAGS.pre_trained_model_path) cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, FLAGS.width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FLAGS.height) mp = _mp.get_context("spawn") v = mp.Value('i', 0) lock = mp.Lock() process = mp.Process(target=mario, args=(v, lock)) process.start() while True: key = cv2.waitKey(10) if key == ord("q"): break _, frame = cap.read() frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) boxes, scores, classes = detect_hands(frame, graph, sess) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) results = predict(boxes, scores, classes, FLAGS.threshold, FLAGS.width, FLAGS.height) if len(results) == 1: x_min, x_max, y_min, y_max, category = results[0] x = int((x_min + x_max) / 2) y = int((y_min + y_max) / 2) cv2.circle(frame, (x, y), 5, RED, -1) if category == "Open" and x <= FLAGS.width / 3: action = 7 # Left jump text = "Jump left" elif category == "Closed" and x <= FLAGS.width / 3: action = 6 # Left text = "Run left" elif category == "Open" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3: action = 5 # Jump text = "Jump" elif category == "Closed" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3: action = 0 # Do nothing text = "Stay" elif category == "Open" and x > 2 * FLAGS.width / 3: action = 2 # Right jump text = "Jump right" elif category == "Closed" and x > 2 * FLAGS.width / 3: action = 1 # Right text = "Run right" else: action = 0 text = "Stay" with lock: v.value = action cv2.putText(frame, "{}".format(text), (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, GREEN, 2) overlay = frame.copy() cv2.rectangle(overlay, (0, 0), (int(FLAGS.width / 3), FLAGS.height), ORANGE, -1) cv2.rectangle(overlay, (int(2 * FLAGS.width / 3), 0), (FLAGS.width, FLAGS.height), ORANGE, -1) cv2.addWeighted(overlay, FLAGS.alpha, frame, 1 - FLAGS.alpha, 0, frame) cv2.imshow('Detection', frame) cap.release() cv2.destroyAllWindows()
def main(): graph, sess = load_graph(FLAGS.pre_trained_model_path) cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, FLAGS.width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FLAGS.height) mp = _mp.get_context("spawn") v = mp.Value('i', 0) lock = mp.Lock() process = mp.Process(target=battle_city, args=(v, lock)) process.start() x_center = int(FLAGS.width / 2) y_center = int(FLAGS.height / 2) radius = int(min(FLAGS.width, FLAGS.height) / 6) while True: key = cv2.waitKey(10) if key == ord("q"): break _, frame = cap.read() frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) boxes, scores, classes = detect_hands(frame, graph, sess) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) results = predict(boxes, scores, classes, FLAGS.threshold, FLAGS.width, FLAGS.height) if len(results) == 1: x_min, x_max, y_min, y_max, category = results[0] x = int((x_min + x_max) / 2) y = int((y_min + y_max) / 2) cv2.circle(frame, (x, y), 5, RED, -1) if category == "Closed" and np.linalg.norm((x - x_center, y - y_center)) <= radius: action = 0 # Stay text = "Stay" elif category == "Closed" and is_in_triangle((x, y), [(0, 0), (FLAGS.width, 0), (x_center, y_center)]): action = 1 # Up text = "Up" elif category == "Closed" and is_in_triangle((x, y), [(0, FLAGS.height), (FLAGS.width, FLAGS.height), (x_center, y_center)]): action = 2 # Down text = "Down" elif category == "Closed" and is_in_triangle((x, y), [(0, 0), (0, FLAGS.height), (x_center, y_center)]): action = 3 # Left text = "Left" elif category == "Closed" and is_in_triangle((x, y), [(FLAGS.width, 0), (FLAGS.width, FLAGS.height), (x_center, y_center)]): action = 4 # Right text = "Right" elif category == "Open": action = 5 # Fire text = "Fire" else: action = 0 text = "Stay" with lock: v.value = action cv2.putText(frame, "{}".format(text), (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, GREEN, 2) overlay = frame.copy() cv2.drawContours(overlay, [np.array([(0, 0), (FLAGS.width, 0), (x_center, y_center)])], 0, CYAN, -1) cv2.drawContours(overlay, [ np.array([(0, FLAGS.height), (FLAGS.width, FLAGS.height), (x_center, y_center)])], 0, CYAN, -1) cv2.drawContours(overlay, [ np.array([(0, 0), (0, FLAGS.height), (x_center, y_center)])], 0, YELLOW, -1) cv2.drawContours(overlay, [np.array([(FLAGS.width, 0), (FLAGS.width, FLAGS.height), (x_center, y_center)])], 0, YELLOW, -1) cv2.circle(overlay, (x_center, y_center), radius, BLUE, -1) cv2.addWeighted(overlay, FLAGS.alpha, frame, 1 - FLAGS.alpha, 0, frame) cv2.imshow('Detection', frame) cap.release() cv2.destroyAllWindows()
def main(): graph, sess = load_graph(FLAGS.pre_trained_model_path) cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, FLAGS.width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FLAGS.height) mp = _mp.get_context("spawn") v = mp.Value('i', 0) lock = mp.Lock() process = mp.Process(target=mimic, args=(v, lock)) process.start() x_center = int(FLAGS.width / 2) y_center = int(FLAGS.height / 2) radius = int(min(FLAGS.width, FLAGS.height) / 4) while True: key = cv2.waitKey(10) if key == ord("q"): break _, frame = cap.read() frame = cv2.flip(frame, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) boxes, scores, classes = detect_hands(frame, graph, sess) results = predict(boxes, scores, classes, FLAGS.threshold, FLAGS.width, FLAGS.height) text = "Oof" top_left_square_corr = np.array([(0, 0), (FLAGS.width // 3, 0), (FLAGS.width // 3, FLAGS.height // 2), (0, FLAGS.height // 2)]) bottom_left_square_corr = np.array([(0, FLAGS.height), (0, FLAGS.height // 2), (FLAGS.width // 3, FLAGS.height // 2), (FLAGS.width // 3, FLAGS.height)]) bottom_right_square_corr = np.array([ (FLAGS.width, FLAGS.height), (FLAGS.width - FLAGS.width // 3, FLAGS.height), (FLAGS.width - FLAGS.width // 4, FLAGS.height - FLAGS.height // 3), (FLAGS.width, FLAGS.height - FLAGS.height // 3) ]) top_right_square_corr = np.array([(FLAGS.width, 0), (FLAGS.width - FLAGS.width // 4, 0), (FLAGS.width - FLAGS.width // 4, FLAGS.height // 3), (FLAGS.width, FLAGS.height // 3)]) if len(results) == 1: x_min, x_max, y_min, y_max, category = results[0] x = int((x_min + x_max) / 2) y = int((y_min + y_max) / 2) cv2.circle(frame, (x, y), 10, RED, -1) if category == "Open" and np.linalg.norm( (x - x_center, y - y_center)) <= radius: action = 1 text = action elif category == "Open" and is_in_square( (x, y), top_left_square_corr): action = 3 text = action elif category == "Open" and is_in_square( (x, y), top_right_square_corr): action = 2 text = action elif category == "Closed" and is_in_square( (x, y), bottom_right_square_corr): action = 4 text = action else: action = 0 with lock: v.value = action cv2.putText(frame, "{}".format(text), (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, GREEN, 2) overlay = frame.copy() height = FLAGS.height // 3 width = FLAGS.width // 3 cv2.drawContours(overlay, [top_left_square_corr], 0, CYAN, -1) cv2.drawContours(overlay, [bottom_right_square_corr], 0, RED, -1) cv2.drawContours(overlay, [bottom_left_square_corr], 0, GREEN, -1) cv2.drawContours(overlay, [top_right_square_corr], 0, YELLOW, -1) cv2.circle(overlay, (x_center, y_center), radius, BLUE, -1) cv2.addWeighted(overlay, FLAGS.alpha, frame, 1 - FLAGS.alpha, 0, frame) cv2.imshow('Detection', frame) cap.release() cv2.destroyAllWindows()
import numpy as np import cv2 import math from tkinter import * import threading import tensorflow as tf from src.utils import load_graph, detect_hands, predict #VISION SETTINGS def nothing(x): pass font = cv2.FONT_HERSHEY_SIMPLEX graph, sess = load_graph("src/pretrained_model.pb") def keypress(p): y = p[1] x = p[0] if y >= 100 and y <= 150: if x >= 330 and x <= 380: pyautogui.typewrite('A') elif x >= 380 and x <= 430: pyautogui.typewrite('B') elif x >= 430 and x <= 480: pyautogui.typewrite('C') elif x >= 480 and x <= 530: pyautogui.typewrite('D') elif x >= 530 and x <= 580:
p=args["p"], model=args["model"], max_hop=3, no_simulations=1) nodes = filter_best_spread_nodes(G, best_nodes, error, filter_function) nodes = filter_min_degree_nodes(G, args["min_degree"], nodes) return nodes if __name__ == "__main__": args = read_arguments() # load graph G = load_graph(args["g_file"], args["g_type"], args["g_nodes"], args["g_new_edges"], args["g_seed"]) prng = random.Random(args["random_seed"]) # load mutation function mutation_operator = None mutators_to_alterate = [] if args["mutators_to_alterate"] is not None: for m in args["mutators_to_alterate"]: mutators_to_alterate.append(getattr(mutators, m)) if args["mutation_operator"] == "adaptive_mutations": mutation_operator = mutators.ea_adaptive_mutators_alteration else: mutation_operator = getattr(mutators, args["mutation_operator"]) if mutation_operator == mutators.ea_local_activation_mutation \
spread_function = monte_carlo spread_function_name = "monte_carlo" models = ["IC", "WC"] # output directories degree_dist_dir = "../experiments/datasets/degree_distributions/" datasets_dir = "../experiments/datasets/" ground_truth_dir = "../experiments/ground_truth/" # compute the ground truth for each seed set size for k in K: for dataset_name in dataset_names: max_trials = 100 G = load_graph(g_type=dataset_name) prng = random.Random(seed) if community: G_sampled1 = sampler.random_walk_sampling_with_fly_back( G, nodes / 2, 0.15, prng) G_sampled2 = sampler.random_walk_sampling_with_fly_back( G, nodes / 2, 0.15, prng) # compose two graphs together G_sampled = nx.compose(G_sampled1, G_sampled2) # while nodes in common keep sampling while len(G_sampled) < nodes and max_trials > 0: G_sampled1 = sampler.random_walk_sampling_with_fly_back( G, nodes / 2, 0.15, prng) G_sampled2 = sampler.random_walk_sampling_with_fly_back( G, nodes / 2, 0.15, prng) # compose two graphs together
def main(): """Load the graph, create the embeddings, evaluate them with link prediction and save the results.""" args = parse_args() graph = utils.load_graph(args.weighted, args.directed, args.input) utils.print_graph_info(graph, "original graph") graph.remove_nodes_from(list(nx.isolates(graph))) utils.print_graph_info(graph, "graph without isolates") edge_splitter_test = EdgeSplitter(graph) graph_test, X_test_edges, y_test = edge_splitter_test.train_test_split( p=args.test_percentage, method="global") edge_splitter_train = EdgeSplitter(graph_test, graph) graph_train, X_edges, y = edge_splitter_train.train_test_split( p=args.train_percentage, method="global") X_train_edges, X_model_selection_edges, y_train, y_model_selection = train_test_split( X_edges, y, train_size=0.75, test_size=0.25) logger.info(f'\nEmbedding algorithm started.') start = time.time() embedding.create_embedding(args, graph_train) time_diff = time.time() - start logger.info(f'\nEmbedding algorithm finished in {time_diff:.2f} seconds.') embeddings = utils.load_embedding(args.output) logger.info(f'\nEmbedding evaluation started.') start = time.time() results = evaluation.evaluate(args.classifier, embeddings, X_train_edges, y_train, X_model_selection_edges, y_model_selection) time_diff = time.time() - start logger.info(f'Embedding evaluation finished in {time_diff:.2f} seconds.') best_result = max(results, key=lambda result: result["roc_auc"]) logger.info( f"\nBest roc_auc_score on train set using '{best_result['binary_operator'].__name__}': {best_result['roc_auc']}." ) logger.info(f'\nEmbedding algorithm started.') start = time.time() embedding.create_embedding(args, graph_test) time_diff = time.time() - start logger.info(f'\nEmbedding algorithm finished in {time_diff:.2f} seconds.') embedding_test = utils.load_embedding(args.output) roc_auc, average_precision, accuracy, f1 = evaluation.evaluate_model( best_result["classifier"], embedding_test, best_result["binary_operator"], X_test_edges, y_test) logger.info( f"Scores on test set using '{best_result['binary_operator'].__name__}'." ) logger.info(f"roc_auc_score: {roc_auc}") logger.info(f"average_precision_score: {average_precision}") logger.info(f"accuracy_score: {accuracy}") logger.info(f"f1_score on test set using: {f1}\n") if (args.results): evaluation.save_evaluation_results( args.dataset, args.method, args.classifier, (roc_auc, average_precision, accuracy, f1), args.results)