def transcrypt_sketch(sketch_name, sketch_dir, pyp5js): """ Command to generate the P5.js code for a python sketch Params: - sketch_name: name of the sketch (will create a {sketch_name}.py) Opitionals - sketch_dir: sketch's directory (defaults to ./{sketch_name}) - pyp5hs: path to the pyp5js main file (defaults to local install) """ SKETCH_DIR = Path(sketch_dir or f'./{sketch_name}') if not SKETCH_DIR.exists(): cprint.warn(f"Couldn't find the sketch.") cprint.err(f"The directory {SKETCH_DIR} doesn't exist.", interrupt=True) sketch = SKETCH_DIR.child(f"{sketch_name}.py") pyp5js = Path(pyp5js or PYP5_DIR) command = ' '.join([ str(c) for c in ['transcrypt', '-xp', pyp5js, '-b', '-m', '-n', sketch] ]) cprint.info(f"Command:\n\t {command}") transcrypt = subprocess.Popen(shlex.split(command)) transcrypt.wait()
async def interact(conn, svr, connector, method, args, verbose=False): try: await connector except Exception as e: print("Unable to connect to server: %s" % e) return -1 cprint.info("\nConnected to: %s\n" % svr) if verbose: donate = await conn.RPC('server.donation_address') if donate: cprint.info("Donations: " + donate) motd = await conn.RPC('server.banner') cprint.info("\n---\n%s\n---" % motd) # XXX TODO do a simple REPL here if method: cprint.warn("\nMethod: %s" % method) # risky type cocerce here args = [(int(i) if i.isdigit() else i) for i in args] try: rv = await conn.RPC(method, *args) cprint.ok(json.dumps(rv, indent=1)) except ElectrumErrorResponse as e: cprint.err(e) conn.close()
def main(): """main cycle of program""" try: plc = connect_to_plc("185.6.25.165", 0, 1) res = True except: res = False while res: res = step_cycle(plc) if not res: cprint.warn("reconnect to plc") main()
def sketch_py(self): py_file = self.sketch_dir.child(f'{self.sketch_name}.py') if self.check_sketch_dir and not py_file.exists(): cwd_py_file = Path(os.getcwd()).child(f"{self.sketch_name}.py") if not cwd_py_file.exists(): cprint.warn(f"Couldn't find the sketch.") cprint.err( f"Neither the file {py_file} or {cwd_py_file} exist.", interrupt=True) py_file = cwd_py_file self._sketch_dir = py_file.parent return py_file
def predict(self, img): # Reshape the latest image orig_h, orig_w = img.shape[:2] # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) input_image = Image.fromarray(img) img_rsz = np.array(input_image.resize(self.input_shape[:2])) if self.arch == 'ssd': (boxes, scores, predictions, _), elapsed = self._forward_pass({self.image_tensor: img_rsz[None, ...]}) boxes = list(np.squeeze(boxes)) scores = list(np.squeeze(scores)) classes = list(np.squeeze(predictions).astype(int)) boxes_full = [] for box, prob, cls in zip(boxes, scores, classes): if prob >= self.confidence_threshold and cls == self.person_class: # x, y, w, h, p y1 = max(box[0], 0.0) * orig_h x1 = max(box[1], 0.0) * orig_w y2 = min(box[2], 1.0) * orig_h x2 = min(box[3], 1.0) * orig_w boxes_full.append([x1, y1, x2-x1, y2-y1, prob]) return boxes_full, elapsed elif self.arch in ['yolov3', 'yolov3tiny']: detections, elapsed = self._forward_pass({self.inputs: img_rsz[None, ...]}) # Nxkx(NUM_CLASSES + 4 + 1) tensor containing k detections for each n-th image # NMS detections_filtered = nms.non_max_suppression(detections[0], 0.5) # The key 0 contains the human detections. if not 0 in detections_filtered: return [], elapsed persons = detections_filtered[0] boxes_full = [] for box, prob in persons: if prob >= self.confidence_threshold: # x, y, w, h, p x1 = max(box[0]/self.input_shape[1], 0.0) * orig_w y1 = max(box[1]/self.input_shape[0], 0.0) * orig_h x2 = min(box[2]/self.input_shape[1], 1.0) * orig_w y2 = min(box[3]/self.input_shape[0], 1.0) * orig_h boxes_full.append([x1, y1, x2-x1, y2-y1, prob]) return boxes_full, elapsed else: cprint.warn(f'Implement predict for {self.arch}!!')
def _validate_sketch_path(sketch_name=None, sketch_dir=None): """ Searches for the sketch .py file """ sketch_dir = Path(sketch_dir or f'{sketch_name}') sketch = sketch_dir.child(f"{sketch_name}.py") if not sketch.exists(): sketch_file = Path(os.getcwd()).child(f"{sketch_name}.py") if not sketch_file.exists(): cprint.warn(f"Couldn't find the sketch.") cprint.err(f"Neither the file {sketch} or {sketch_file} exist.", interrupt=True) sketch = sketch_file sketch_dir = sketch.parent return sketch
def new_sketch(sketch_name, sketch_dir): """ Creates a new sketch, on a folder/directory with the required assets and a index.html file, all based on a template :param sketch_name: name for new sketch :type sketch_name: string :param sketch_dir: directory name :type sketch_dir: string :return: file names :rtype: list of strings """ SKETCH_DIR = Path(sketch_dir or f'{sketch_name}') if SKETCH_DIR.exists(): cprint.warn(f"Cannot configure a new sketch.") cprint.err(f"The directory {SKETCH_DIR} already exists.", interrupt=True) static_dir = SKETCH_DIR.child('static') templates_files = [(TEMPLATES_DIR.child('base_sketch.py'), SKETCH_DIR.child(f'{sketch_name}.py')), (PYP5_DIR.child('static', 'p5.js'), static_dir.child('p5.js'))] index_template = templates.get_template('index.html') context = { "p5_js_url": "static/p5.js", "sketch_js_url": f"{TARGET_DIRNAME}/{sketch_name}.js", "sketch_name": sketch_name, } index_contet = index_template.render(context) os.mkdir(SKETCH_DIR) os.mkdir(static_dir) for src, dest in templates_files: shutil.copyfile(src, dest) with open(SKETCH_DIR.child("index.html"), "w") as fd: fd.write(index_contet) return templates_files[0][1]
def b_func(n, i): cprint.warn(f"B({n},{i})") if n % 2 == 0 and n > 2: if i < (n - 1): cprint.warn(f"return {i}") return i else: cprint.warn(f"return {n - 2}") return n - 2 else: cprint.warn(f"return {n - 1}") return n - 1
def new_sketch(sketch_name, sketch_dir): """ Creates a new sketch, on a folder/directory with the required assets and a index.html file, all based on a template :param sketch_name: name for new sketch :type sketch_name: string :param sketch_dir: directory name :type sketch_dir: string :return: file names :rtype: list of strings """ sketch_files = Pyp5jsSketchFiles(sketch_dir, sketch_name, check_sketch_dir=False) if not sketch_files.can_create_sketch(): cprint.warn(f"Cannot configure a new sketch.") cprint.err(f"The directory {sketch_files.sketch_dir} already exists.", interrupt=True) pyp5js_files = Pyp5jsLibFiles() templates_files = [ (pyp5js_files.base_sketch, sketch_files.sketch_py), (pyp5js_files.p5js, sketch_files.p5js), (pyp5js_files.p5_dom_js, sketch_files.p5_dom_js), ] os.makedirs(sketch_files.sketch_dir) os.mkdir(sketch_files.static_dir) for src, dest in templates_files: shutil.copyfile(src, dest) index_contet = get_index_content(sketch_name) with open(sketch_files.index_html, "w") as fd: fd.write(index_contet) return sketch_files.sketch_py, sketch_files.index_html
def configure_new_sketch(sketch_name, sketch_dir): """ Create dir and configure boilerplate Params: - sketch_name: name of the sketch (will create a {sketch_name}.py) Opitionals - sketch_dir: directory to save the sketch (defaults to ./{sketch_name}) """ SKETCH_DIR = Path(sketch_dir or f'./{sketch_name}') if SKETCH_DIR.exists(): cprint.warn(f"Cannot configure a new sketch.") cprint.err(f"The directory {SKETCH_DIR} already exists.", interrupt=True) static_dir = SKETCH_DIR.child('static') templates_files = [(TEMPLATES_DIR.child('base_sketch.py'), SKETCH_DIR.child(f'{sketch_name}.py')), (PYP5_DIR.child('static', 'p5.js'), static_dir.child('p5.js'))] os.mkdir(SKETCH_DIR) os.mkdir(static_dir) for src, dest in templates_files: copyfile(src, dest) index_template = templates.get_template('index.html') context = { "p5_js_url": "static/p5.js", "sketch_js_url": f"__target__/{sketch_name}.js", } index_contet = index_template.render(context) with open(SKETCH_DIR.child("index.html"), "w") as fd: fd.write(index_contet)
def listen_server_mvlab(): while True: cprint.info("Try to start xocket server") try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('0.0.0.0', SOCKET_PORT)) s.listen() cprint.warn('Listen 0.0.0.0:%s' % SOCKET_PORT) except: time.sleep(5) continue while True: try: conn, addr = s.accept() while True: try: data = conn.recv(1024) print(data) if not data: break try: data = json.loads(data) print(data) except: cprint.err("string not json") data = json.dumps( "{'error':'string not json'}").encode('utf-8') conn.send(data) if "dash_teldafax" in data: data = json.dumps(result_query).encode('utf-8') cprint.warn('sended %s' % data) conn.send(data) elif "get_connections" in data: ss = [] if "connection_name" in data: ss = list_connections[ data["connection_name"]]['value_list'] ass = [] for i in ss: ass.append({"name": i['name']}) ss = json.dumps(ass).encode('utf-8') ss_len = int(math.ceil((len(ss) / 1024))) conn.send(ss_len.to_bytes(2, 'big')) for i in range(ss_len): start = i * 1024 end = (i + 1) * 1024 cprint.info(ss[start:end]) conn.send(ss[start:end]) time.sleep(0.2) time.sleep(0.5) conn.close() else: count = 0 for d in list_connections: ss.append({ 'connection_name': d['name'], "ip": d['ip'], 'key': count }) count += 1 print(ss) data = json.dumps(ss).encode('utf-8') conn.send(data) time.sleep(0.5) else: data = {} count = 0 for i in list_connections: data[i['name']] = [ statuses_connection[count], i['name'], i['ip'] ] count += 1 print(data) data = json.dumps(data).encode('utf-8') cprint.warn('sended %s' % data) conn.send(data) except Exception as e: cprint.info(e) break except: s.close() break
def follow(self): full_image = self.camera.get_rgb_image() img2show = cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR) self.network.predict() self.detection_boxes = self.network.boxes self.detection_scores = self.network.scores for xmin, ymin, xmax, ymax in self.detection_boxes: cv2.rectangle(img2show, (xmin, ymax), (xmax, ymin), (0, 0, 255), 5) self.persons = self.person_tracker.evalPersons(self.detection_boxes, self.detection_scores, full_image) self.faces = self.person_tracker.getFaces(full_image) cprint.info('\t........%d/%d faces detected........' % (len(self.faces), len(self.persons))) mom_found_now = False # Iteration over all faces and persons... for person in self.persons: if person.is_mom: self.mom_coords = person.coords mom_found_now = True break else: faces = person.ftrk.tracked_faces if len(faces) > 0: face = faces[0] [f_width, f_height] = [face[2] - face[0], face[3] - face[1]] f_total_box = np.zeros(4, dtype=np.int16) f_total_box[:2] = person[:2] + face[:2] f_total_box[2:4] = f_total_box[:2] + [f_width, f_height] cropped_face = full_image[f_total_box[1]:f_total_box[3], f_total_box[0]:f_total_box[2], :] # We compute the likelihood with mom... dist_to_mom = self.siamese_network.distanceToMom( cropped_face) if dist_to_mom < self.face_thres: # Unset other moms for idx2 in range(len(self.persons)): self.person_tracker.tracked_persons[ idx2].is_mom = False # And set that person to mom. self.person_tracker.tracked_persons[idx].is_mom = True self.mom_coords = person.coords mom_found_now = True break if mom_found_now: cprint.ok("\t\t Mom found") cprint.ok(str(self.mom_coords)) #[xmin, ymin, xmax, ymax] = self.mom_coords #cv2.rectangle(img2show, (xmin, ymax), (xmax, ymin), (0,255,0), 5) else: cprint.warn("\t\t Looking for mom...") return img2show
def console_warn(msg): now_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f") cprint.warn('%s [WARN] - %s' % (now_time, msg))
def goToMom(mom_box): ''' Function to go towards mom. ''' # We compute the distance to mom ################################ ############### v ############## ################################ mom_depth = full_depth[mom_box[1]:mom_box[3], mom_box[0]:mom_box[2]] distance, grid = self.estimateDepth(mom_depth) # V error processing (go forward/backward) error = distance - self.v_center if self.prev_error is not None: d_error = abs(error - self.prev_error) else: d_error = 0 # Avoid jumps if d_error < 10.0: if error < -self.v_margin: # Too near v = self.v_PID.processError(error, verbose=True) cprint.warn(' Distance: %d px (too near) >> VX = %.3f m/s' % (distance, v)) # Avoid overswitching if not self.margin_expanded: self.v_margin = self.v_margin - 5 self.margin_expanded = True self.margin_expanded = True cprint.info("New margin: %d" % (self.v_margin)) elif error > self.v_margin: # Too far v = self.v_PID.processError(error, verbose=True) cprint.warn(' Distance: %d px (too far) >> VX = %.3f m/s' % (distance, v)) # Avoid overswitching if not self.margin_expanded: self.v_margin = self.v_margin - 5 self.margin_expanded = True cprint.info("New margin: %d" % (self.v_margin)) else: # Inside range (OK) cprint.ok(' Distance: %d px (under control)' % (distance)) self.v_PID.resetError() self.v_PID.brake() if self.margin_expanded and error < self.original_v_margin: # The margin can be restored... cprint.info("Margin restored.") self.v_margin = self.original_v_margin self.margin_expanded = False self.prev_error = error # Now, we compute the necessary turning ################################ ############### w ############## ################################ mom_center = (mom_box[2] + mom_box[0]) / 2 h_error = self.center_coords[0] - mom_center if abs(h_error) > self.w_margin: # Turning... w = self.w_PID.processError(h_error, verbose=False) if w < 0: turn_dir = 'right' else: turn_dir = 'left' cprint.warn(' Angle: %d px >> Turning %s (w: %.3f r/s)' % (h_error, turn_dir, w)) else: # Inside the angle margin (OK) cprint.ok(' Angle: %d px (under control)' % (h_error)) self.w_PID.resetError() self.w_PID.brake()
def move(self): ''' Method called on each iteration. Detects persons and look for mom. Commands the robot towards mom if it is found. ''' # We get the full RGB and D images. full_image = self.camera.getImage() d = self.depth.getImage() full_depth, _, _ = cv2.split(d) def goToMom(mom_box): ''' Function to go towards mom. ''' # We compute the distance to mom ################################ ############### v ############## ################################ mom_depth = full_depth[mom_box[1]:mom_box[3], mom_box[0]:mom_box[2]] distance, grid = self.estimateDepth(mom_depth) # V error processing (go forward/backward) error = distance - self.v_center if self.prev_error is not None: d_error = abs(error - self.prev_error) else: d_error = 0 # Avoid jumps if d_error < 10.0: if error < -self.v_margin: # Too near v = self.v_PID.processError(error, verbose=True) cprint.warn(' Distance: %d px (too near) >> VX = %.3f m/s' % (distance, v)) # Avoid overswitching if not self.margin_expanded: self.v_margin = self.v_margin - 5 self.margin_expanded = True self.margin_expanded = True cprint.info("New margin: %d" % (self.v_margin)) elif error > self.v_margin: # Too far v = self.v_PID.processError(error, verbose=True) cprint.warn(' Distance: %d px (too far) >> VX = %.3f m/s' % (distance, v)) # Avoid overswitching if not self.margin_expanded: self.v_margin = self.v_margin - 5 self.margin_expanded = True cprint.info("New margin: %d" % (self.v_margin)) else: # Inside range (OK) cprint.ok(' Distance: %d px (under control)' % (distance)) self.v_PID.resetError() self.v_PID.brake() if self.margin_expanded and error < self.original_v_margin: # The margin can be restored... cprint.info("Margin restored.") self.v_margin = self.original_v_margin self.margin_expanded = False self.prev_error = error # Now, we compute the necessary turning ################################ ############### w ############## ################################ mom_center = (mom_box[2] + mom_box[0]) / 2 h_error = self.center_coords[0] - mom_center if abs(h_error) > self.w_margin: # Turning... w = self.w_PID.processError(h_error, verbose=False) if w < 0: turn_dir = 'right' else: turn_dir = 'left' cprint.warn(' Angle: %d px >> Turning %s (w: %.3f r/s)' % (h_error, turn_dir, w)) else: # Inside the angle margin (OK) cprint.ok(' Angle: %d px (under control)' % (h_error)) self.w_PID.resetError() self.w_PID.brake() # Network outputs. Exclusively high score people detections. self.detection_boxes = self.network.boxes self.detection_scores = self.network.scores # num_detections = len(self.detection_boxes) # We retrieve every detected face on the current frame. self.persons = self.person_tracker.evalPersons(self.detection_boxes, self.detection_scores, full_image) # Now, we look for faces in those persons. print "" self.faces = self.person_tracker.getFaces(full_image) cprint.info('\t........%d/%d faces detected........' % (len(self.faces), len(self.persons))) mom_found_now = False # Iteration over all faces and persons... for idx in range(len(self.persons)): person = self.persons[idx] if person.is_mom: self.mom_coords = person.coords mom_found_now = True break else: faces = person.ftrk.tracked_faces if len(faces) > 0: face = faces[0] [f_width, f_height] = [face[2] - face[0], face[3] - face[1]] f_total_box = np.zeros(4, dtype=np.int16) f_total_box[:2] = person[:2] + face[:2] f_total_box[2:4] = f_total_box[:2] + [f_width, f_height] cropped_face = full_image[f_total_box[1]:f_total_box[3], f_total_box[0]:f_total_box[2], :] # We compute the likelihood with mom... dist_to_mom = self.siamese_network.distanceToMom(cropped_face) if dist_to_mom < self.face_thres: # Unset other moms for idx2 in range(len(self.persons)): self.person_tracker.tracked_persons[idx2].is_mom = False # And set that person to mom. self.person_tracker.tracked_persons[idx].is_mom = True self.mom_coords = person.coords mom_found_now = True break # If mom is being tracked, we move the robot towards it. if mom_found_now: cprint.ok("\t\t Mom found") goToMom(self.mom_coords) else: cprint.warn("\t\t Looking for mom...") self.v_PID.lostResponse() self.w_PID.lostResponse()
def __init__(self, arch, input_shape, frozen_graph=None, graph_def=None, dataset='coco', confidence_threshold=0.5, path_to_root=None): labels_file, max_num_classes = LABELS_DICT[dataset] # Append dir if provided (calling from another directory) if path_to_root is not None: labels_file = path.join(path_to_root, labels_file) label_map = label_map_util.load_labelmap(labels_file) # loads the labels map. categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=max_num_classes) category_index = label_map_util.create_category_index(categories) self.classes = {k:str(v['name']) for k, v in category_index.items()} # Find person index for idx, class_ in self.classes.items(): if class_ == 'person': self.person_class = idx break # Graph load. We allocate the session attribute self.sess = None if frozen_graph is not None: # Read the graph def from a .pb file graph_def = tf.compat.v1.GraphDef() cprint.info(f'Loading the graph def from {frozen_graph}') with tf.io.gfile.GFile(frozen_graph, 'rb') as f: graph_def.ParseFromString(f.read()) self.load_graphdef(graph_def) elif graph_def is not None: cprint.info('Loading the provided graph def...') self.load_graphdef(graph_def) else: # No graph def was provided! cprint.fatal('The graph definition has not been loaded.', interrupt=True) self.input_shape = input_shape self.arch = arch # Dummy tensor to be used for the first inference. dummy_tensor = np.zeros((1,*self.input_shape), dtype=np.int32) # Set placeholders, depending on the network architecture cprint.warn(f'Network architecture: {self.arch}') if self.arch == 'ssd': # Inputs self.image_tensor = self.sess.graph.get_tensor_by_name('image_tensor:0') # Outputs self.detection_boxes = self.sess.graph.get_tensor_by_name('detection_boxes:0') self.detection_scores = self.sess.graph.get_tensor_by_name('detection_scores:0') self.detection_classes = self.sess.graph.get_tensor_by_name('detection_classes:0') self.num_detections = self.sess.graph.get_tensor_by_name('num_detections:0') self.boxes = [] self.scores = [] self.predictions = [] self.output_tensors = [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections] self.dummy_feed = {self.image_tensor: dummy_tensor} elif self.arch in ['yolov3', 'yolov3tiny']: # Inputs self.inputs = self.sess.graph.get_tensor_by_name('inputs:0') # Outputs self.output_boxes = self.sess.graph.get_tensor_by_name('output_boxes:0') self.output_tensors = [self.output_boxes] self.dummy_feed = {self.inputs: dummy_tensor} elif self.arch == 'face_yolo': # Inputs self.input = self.sess.graph.get_tensor_by_name('img:0') self.training = self.sess.graph.get_tensor_by_name('training:0') # Outputs self.prob = self.sess.graph.get_tensor_by_name('prob:0') self.x_center = self.sess.graph.get_tensor_by_name('x_center:0') self.y_center = self.sess.graph.get_tensor_by_name('y_center:0') self.w = self.sess.graph.get_tensor_by_name('w:0') self.h = self.sess.graph.get_tensor_by_name('h:0') self.output_tensors = [self.prob, self.x_center, self.y_center, self.w, self.h] self.dummy_feed = {self.input: dummy_tensor, self.training: False} elif self.arch == 'face_corrector': # Inputs self.input = self.sess.graph.get_tensor_by_name('img:0') self.training = self.sess.graph.get_tensor_by_name('training:0') # Outputs self.X = self.sess.graph.get_tensor_by_name('X:0') self.Y = self.sess.graph.get_tensor_by_name('Y:0') self.W = self.sess.graph.get_tensor_by_name('W:0') self.H = self.sess.graph.get_tensor_by_name('H:0') self.output_tensors = [self.X, self.Y, self.W, self.H] self.dummy_feed = {self.input: dummy_tensor, self.training: False} elif self.arch == 'facenet': # Inputs self.input = self.sess.graph.get_tensor_by_name('input:0') self.phase_train = self.sess.graph.get_tensor_by_name('phase_train:0') # Outputs self.embeddings = self.sess.graph.get_tensor_by_name('embeddings:0') self.output_tensors = [self.embeddings] self.dummy_feed = {self.input: dummy_tensor, self.phase_train: False} else: cprint.fatal(f'Architecture {arch} is not supported', interrupt=True) # First (slower) inference cprint.info("Performing first inference...") self._forward_pass(self.dummy_feed) self.confidence_threshold = confidence_threshold cprint.ok("Detection network ready!")