def testMarker(): cap = CAMERA_CAPTURE while(True): ret, frame = cap.read() corners = numpy.float32([[100,100], [200,100], [200,200], [100,200]]) gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) mm = Marker(corners) print "Test 1: Drawing the marker" gray = mm.draw(gray) print "Test 2: Calculate Extrinsics" mm.calculateExtrinsics(MARKER_SIZE, MY_CAMERA) print "RVec : " , mm.Rvec, "\n" print "Tvec : " , mm.Tvec, "\n" if gray != None: cv.imshow('frame', gray) if cv.waitKey(1) & 0xFF == ord('q'): break print "Marker passed validation" cap.release() cv.destroyAllWindows()
def __init__(self): self.du = DU() self.vocab, self.recab = self.du.initialize_vocabulary() self.tag, self.retag = self.du.init_tag() #载入标签和对应的id self.ids_arr = [] for line in open(self.du.ids_path): line = line.strip() if len(line) > 0: temp = line.split(' ') for i in range(len(temp)): try: temp[i] = int(temp[i]) except Exception: temp[i] = 3 self.ids_arr.append(temp) else: self.ids_arr.append([]) # self.train = json.load(open(self.du.train_path)) # self.dev = json.load(open(self.du.dev_path)) # self.test = json.load(open(self.du.test_path)) self.model = Marker( vocab_size=FLAGS.vocab_size, embedding_size=FLAGS.emd_size, memory_size=FLAGS.mem_size, label_size=FLAGS.tag_size, batch_size=FLAGS.batch_size, max_ut_size=FLAGS.max_ut_size, max_gradient_norm=FLAGS.max_gradient_norm, learning_rate=FLAGS.learning_rate, learning_rate_decay_factor=FLAGS.learning_rate_decay_factor, use_lstm=False, train_mode=FLAGS.train)
def __init__(self): super(bciApp,self).__init__(config_path = r'./config.js') self.PHASES = [ {'name':'start','next':'test','duration':1}, {'name':'test','next':'relax','duration':5}, {'name':'relax','next':'test','duration':3}, {'name':'rrelax','next':'stop','duration':1}] self.CODER = DefaultCoder() layout = {'screen': {'size': (1000, 250), 'color': (0, 0, 0), 'type': 'normal', 'Fps': 60, 'caption': 'this is an example'}, 'ssvep1': {'class': 'sinBlock', 'parm': {'size': (150, 150), 'position': (125, 125),'borderon':True, 'bordercolor':(0,0,0),'anchor':'center','frequency': 6, 'visible': True, 'start': False}}, 'ssvep2': {'class': 'sinBlock', 'parm': {'size': (150, 150), 'position': (375, 125),'borderon':True, 'bordercolor':(0,0,0),'anchor': 'center','frequency': 7, 'visible': True, 'start': False}}, 'ssvep3': {'class': 'sinBlock', 'parm': {'size': (150, 150), 'position': (625, 125),'borderon':True, 'bordercolor':(0,0,0),'anchor':'center','frequency': 8, 'visible': True, 'start': False}}, 'ssvep4': {'class': 'sinBlock', 'parm': {'size': (150, 150), 'position': (875, 125),'borderon':True, 'bordercolor':(255,0,0),'anchor': 'center', 'frequency': 9, 'visible': True, 'start': False}}, } self.gui = GuiIF(None,layout) _ = multiprocessing.Process(target=guiengine_proc,args=(self.gui.args,)) _.start() self.gui.wait() sp_ip = self.configs['signal_processing']['sp_host_ip'] sp_port = self.configs['signal_processing']['sp_host_port'] self.marker = Marker((sp_ip,sp_port)) self.TEST_NUM = 3 self.test_count = 1
def __init__(self,name): ''' Constructor ''' Marker.__init__(self, name) self.black_inside=-1 self.GRID_SIZE=2 self.rot_conflict=0
class Qualify(object): def __init__(self): print '<===INIT QUALIFY===>' self.marker = Marker() self.gate = Gate() def run(self): self.marker.run() self.gate.run()
def main(): (rand_proc, tree_height) = parse_args() if isinstance(rand_proc, R3): marker = MarkerR3(tree_height) else: marker = Marker(tree_height) marker.run(rand_proc) print(marker.status()) return 0
def __init__(self, vo, library): self.vo = vo self.library = library rospy.TopicSub('/vo/key', Frame, self.incoming_frame) self.pub_tmo = rospy.Publisher("/vo/tmo", Pose44) self.frameq = Queue() self.mark0 = Marker(1) self.mark1 = Marker(2)
def main(): image_fn = '/home/joni/smallbrains-nas1/array1/pseudopupil_joni/Spectrometer/DPP_cal_1_ty2/snap_2020-02-21_14.15.08.088000_0.tiff' fig, ax = plt.subplots() marker = Marker(fig, ax, [image_fn], None) pinhole = marker.run() crop = pinhole[image_fn]) pinhole_image = image_fn coodinates = get_xy_coordinates(image_fn)
class codebreaker(object): def __init__(self, output): self.output = output self.output.report_welcome() def start(self, secret): self.secret = secret self.marker = Marker(self.secret) def guess(self, guess): marks = "+" * self.marker.exact_match_count(guess) + "-" * self.marker.number_match_count(guess) self.output.report_matches(marks) def has_won(self, guess): return self.marker.exact_match_count(guess) == len(self.secret)
def setUp(self): """Prepare PTYs, MarkerEmulator and Marker.""" logging.debug("setting up..") self.running = True """ # create two connected PTYs cmd = ['socat', '-d', '-d', 'pty,raw,echo=0', 'pty,raw,echo=0'] try: self.socat = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT) with self.socat.stdout as stdout: marker_pty = stdout.readline().split()[-1] if not os.path.exists(marker_pty): raise Exception('PTY creation failed.') client_pty = stdout.readline().split()[-1] except OSError: raise Exception('%s is not installed' % cmd[0]) """ marker_pty = b'/dev/pts/3' client_pty = b'/dev/pts/4' self.marker_emu = MarkerEmulator(str(marker_pty, encoding='UTF-8')) self.marker_emu.start() self.marker_client = Marker(str(client_pty, encoding='UTF-8')) self.marker_client.start()
def construct(self, csvFileIn, csvFileOut='grades_with_groundtruths.csv'): ''' Assigns data to @self nmarkers, markers, paper and data from @param csvFileIn the CSV file that contains the grades @param csvFileOut the CSV file that will store the @self data using a @self csvManager @see self.getNumberOfMarkers @see self.defineMeanOfReviews @see self.defineAvgOfReviews @see self.getPaperReviews @see CSV.extractGradesColumn @see CSV.printGroundTruth @see CSV.extractData ''' self.csvManager = CSV(csvFileIn, csvFileOut) self.nmarkers = self.csvManager.getNumberOfMarkers() if self.nmarkers == 0: print("File is empty") else: for i in range(self.nmarkers): self.markers.append( Marker(self.csvManager.extractGradesColumn(i))) for i in range(1, len(self.markers[1].reviews) + 1): self.paper.add(self.defineMeanOfReviews(i), self.defineAvgOfReviews(i), self.getPaperReviews(i)) self.csvManager.printGroundTruth(self) self.data = self.csvManager.extractData() self.main()
class App: def __init__(self) -> None: self._zero_metadata: Metadata = {} self._marker = Marker("api_route", default=self._zero_metadata) def _register_with( self, fn: t.Callable[..., t.Any], *, method: str, path: str, metadata: Metadata ) -> t.Callable[..., t.Any]: metadata["method"] = method.lower() metadata["path"] = path k = f'{metadata["method"]}::{metadata["path"]}' return self._marker.mark(fn, name=k, val=metadata) def post( self, path: str, metadata: t.Optional[Metadata] = None, ) -> t.Callable[[t.Callable[..., t.Any]], t.Callable[..., t.Any]]: metadata: Metadata = metadata or {} return partial(self._register_with, method="post", path=path, metadata=metadata) @property def routes(self) -> t.Iterator[t.Tuple[t.Callable[..., t.Any], Metadata]]: for fns in self._marker.pool.values(): for fn in fns: metadata = self._marker(fn) yield fn, metadata
def _create_markers(steps, pois): markers = [] for step in steps: poi = Poi.from_id(pois, step.id) marker = Marker(poi, step) markers.append(marker) return markers
def __init__(self): super(bciApp,self).__init__(config_path = r'./config.js') self.PHASES = [ {'name':'start','next':'test','duration':1}, {'name':'test','next':'relax','duration':7}, {'name':'relax','next':'test','duration':5}, {'name':'rrelax','next':'stop','duration':1}] self.CODER = DefaultCoder() w = win32api.GetSystemMetrics(win32con.SM_CXSCREEN) h = win32api.GetSystemMetrics(win32con.SM_CYSCREEN) camera_h = 2*240 camera_w = 2*320 layout = {'screen': {'size': (w, h), 'color': (0, 0, 0), 'type': 'normal', 'Fps': 60, 'caption': 'this is an example'}, 'camera':{'class':'MjpegStream','parm':{'size':(camera_w,camera_h),'position':(int(w/2),int(h/2)), 'anchor':'center','visible':True,'start':True,'url':'http://192.168.1.1:8080/?action=stream'}}, 'ssvep1': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w-320-65), int(0.5*h-240+30)), 'text':'forward','textsize':20,'frequency': 6.6, 'visible': True, 'start': False}}, 'ssvep2': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w-320-65), int(0.5*h)), 'text':'left','textsize':20,'frequency': 7.6, 'visible': True, 'start': False}}, 'ssvep3': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w-320-65), int(0.5*h+240-30)), 'text':'r-left','textsize':20,'frequency': 8.6, 'visible': True, 'start': False}}, 'ssvep4': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w+320+65), int(0.5*h-240+30)), 'text':'backward','textsize':20,'frequency': 9.3, 'visible': True, 'start': False}}, 'ssvep5': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w+320+65), int(0.5*h)), 'text':'right','textsize':20,'frequency': 12.2, 'visible': True, 'start': False}}, 'ssvep6': {'class': 'sinCircle', 'parm': {'radius':65, 'position': (int(0.5*w+320+65), int(0.5*h+240-30)), 'text':'r-right','textsize':20,'frequency': 13.8, 'visible': True, 'start': False}}, } self.gui = GuiIF(None,layout) _ = multiprocessing.Process(target=guiengine_proc,args=(self.gui.args,)) _.start() self.gui.wait() sp_ip = self.configs['signal_processing']['sp_host_ip'] sp_port = self.configs['signal_processing']['sp_host_port'] self.marker = Marker((sp_ip,sp_port)) self.TEST_NUM = 12 self.test_count = 1 self.ROBOT = WIFIROBOT_CTR()
def __init__(self, mode, library): self.mode = mode self.library = library rospy.TopicSub('/videre/images', ImageArray, self.display_array) rospy.TopicSub('/videre/cal_params', String, self.display_params) rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections) self.viz_pub = rospy.Publisher("/overlay", Lines) self.vo_key_pub = rospy.Publisher("/vo/key", Frame) self.Tmo = Pose() self.mymarker1 = Marker(10) self.mymarkertrail = [Marker(11 + i) for i in range(10)] self.trail = [] self.vis = Vis()
def set_new_position(self, points, offset=True,scale=1): res = Marker.set_new_position(self, points, offset,scale) le = len(points) if le<0 or type(points[0])<>tuple: return res for i, cor in enumerate(self.corners): cor.diag = vector(cor.p, points[(i + 2) % le]) cor.set_colors(self.m_d.draw_img) return res
def set_new_position(self, points, offset=True, scale=1): res = Marker.set_new_position(self, points, offset, scale) le = len(points) if le < 0 or type(points[0]) <> tuple: return res for i, cor in enumerate(self.corners): cor.diag = vector(cor.p, points[(i + 2) % le]) cor.set_colors(self.m_d.draw_img) return res
def border_loop_markers(self): output = list() count = 0 for l in self.border_loops: points = l.to_list() if len(points) > 3: color = get_color(count) count += 1 for p in points: output.append(Marker(p.x, p.y, color)) return output
def play(): sprites = [ Marker(4, 4) # Lava(0), # Lava(1), # Lava(2), # Lava(3), # Lava(4), # Lava(5), # Lava(6), # Lava(7) ] launch_single_tile(sprites)
def __init__(self, mode, library): self.mode = mode self.library = library rospy.TopicSub('/videre/images', ImageArray, self.display_array) rospy.TopicSub('/videre/cal_params', String, self.display_params) rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections) self.viz_pub = rospy.Publisher("/overlay", Lines) self.vo_key_pub = rospy.Publisher("/vo/key", Frame) self.Tmo = Pose() self.mymarker1 = Marker(10) self.mymarkertrail = [ Marker(11 + i) for i in range(10) ] self.trail = [] self.vis = Vis()
def find_markers(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #blur = cv2.medianBlur(gray, 5) blur = cv2.GaussianBlur(gray, (3, 3), 0) __, thresh = cv2.threshold(blur, 100, 255, cv2.THRESH_BINARY) contours, __ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) markers = dict() for contour in contours: if small_area(contour): continue eps = 0.05 * cv2.arcLength(contour, closed=True) polygon = cv2.approxPolyDP(contour, eps, closed=True) if not_quadrilateral(polygon): continue polygon_fl = np.float32(polygon) tr_matrix = transform_matrix(polygon) transform = cv2.getPerspectiveTransform(polygon_fl, tr_matrix) # FIXME: Choose algorithm # Reuse thresholded image, # -- OR -- # Run warpPerspective on "gray" threshold the result again. # In the latter case, the call to thresh.copy() is redundant. sq_marker = cv2.warpPerspective(thresh, transform, (WIDTH, HEIGHT)) #__, sq_marker_bin = cv2.threshold(sq_marker, 0, 255, # cv2.THRESH_BINARY + cv2.THRESH_OTSU) if no_black_border(sq_marker): continue marker = parse_marker(sq_marker) valid_marker, marker_id, rotations = validate_marker(marker) if not valid_marker: continue markers[marker_id] = Marker(marker_id, contour, polygon, rotations) return markers
def updatemarker(): # check the data against the marker regular expression to check if there is complete marker information in the received data global DATA MARKER_RE_MATCH = re.search(MARKER_REGEX, DATA) # as long as we still have more complete marker information... while MARKER_RE_MATCH != None: # find the first marker information from the received data MARKER = Marker(MARKER_RE_MATCH.group()) # show that we have found something #print("Found marker {0}.".format(MARKER.number)) # update information in the marker collection MARKERS.update_marker(MARKER) # remove outdated observations MARKERS.clean_outdated_markers(MARKER_TIMEOUT) # remove the processed data from the buffer DATA = DATA[MARKER_RE_MATCH.end():] # look for the next marker in the buffer MARKER_RE_MATCH = re.search(MARKER_REGEX, DATA) # send something arbitrary to the Zigbee dongle. This has no real function except to demonstrate how to send something. #ZIGBEE.write(b'Hello') if len(DATA) > BUFFER_SIZE: # something is going wrong, flush garbage data DATA = "" # indicate that the markers are global global ORIGIN global ROBOTA global OB1 global OB2 global OB3 global FINISH # find all markers ORIGIN = MARKERS.get_marker('origin') ROBOTA = MARKERS.get_marker('robotA') OB1 = MARKERS.get_marker('ob1') OB2 = MARKERS.get_marker('ob2') OB3 = MARKERS.get_marker('ob3') FINISH = MARKERS.get_marker('finish')
def index(): user_agent = request.headers.get('User-Agent') data1 = request.get_data() data_json = json.loads(data1) poi_list = data_json["poi_list"] # for poi in poi_list: # lat = poi["lat"] # lng = poi["lon"] tuple_latlng_str = ["8.5208, 47.37288", "8.53410, 47.37290"] marker = tuple_latlng_str context = Context() # if center is not None: # context.set_center(parse_latlng(center)) # if zoom is not None: # context.set_zoom(zoom) for coords in marker: context.add_object(Marker(parse_latlng(coords))) image = context.render(264, 264) return '<h1>Hello World! Your browser is {}</h1>'.format(user_agent)
def __init__(self, _world, _filename=None, _friction=None, _id=None, _traditional=False): self.world = _world self.filename = _filename self.friction = _friction if self.filename is not None: _b_trad = 1 if _traditional else 0 # print "=" * 80 # print _b_trad # print "=" * 80 self.id = papi.addSkeleton(self.world.id, _filename, _friction, _b_trad) else: self.id = _id # Initialize dofs _ndofs = papi.getSkeletonNumDofs(self.world.id, self.id) self.dofs = [Dof(self, i) for i in range(_ndofs)] self.name_to_dof = {dof.name: dof for dof in self.dofs} # Initialize bodies _nbodies = papi.getSkeletonNumBodies(self.world.id, self.id) self.bodies = [Body(self, i) for i in range(_nbodies)] self.name_to_body = {body.name: body for body in self.bodies} self.controller = None # Initialize markers self.markers = list() for body in self.bodies: for j in range(body.num_markers()): m = Marker(body, j) self.markers.append(m)
def __init__(self, stims, Q_c2g, E_g2c, server_address): """ stims: dict to define a stimulus. eg. stims = {'cue':{'class':'Block','parm':{'size':(100,40),'position':(0,0)}}} Q_c2g: multiprocessing.Queue, used for accepting stimulus control command from core process kwargs: server_address = ?, the target server's address to accept marker property for describe screen size: (width,height) type: fullscreen/normal frameless: True/False color: (R,G,B) caption: string Fps: int, strongly suggest you set Fps as the same with system's Fps """ self.Q_c2g = Q_c2g self.E_g2c = E_g2c self.marker_on = False self.marker_event = {} self.stp = False self.lock = threading.Lock() pygame.init() #初始化screen # 当且仅当window环境变量设置成功且fullscreen时,SCREEN_SYNC=True if stims['screen']['type'].lower() == 'fullscreen': self.screen = pygame.display.set_mode( (0, 0), FULLSCREEN | DOUBLEBUF | HWSURFACE, 32) else: # 将窗口置中 if OS == 'windows': w, h = stims['screen']['size'] x = int((SCRW - w) / 2.) y = int((SCRH - 50 - h) / 2.) - 50 # 扣除任务栏高度 os.environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i' % (x, y) self.screen = pygame.display.set_mode(stims['screen']['size'], NOFRAME | DOUBLEBUF, 32) SCREEN_SYNC = False self.screen_color = stims['screen']['color'] self.screen.fill(self.screen_color) pygame.display.set_caption(stims['screen']['caption']) self.Fps = stims['screen']['Fps'] del stims['screen'] self.ask_4_update_gui = False #线程接收到刷新请求后,通知主进程刷新 self.update_in_this_frame = False #主进程在一帧真实的刷新帧中确定能够进行刷新 self.__update_per_frame_list = [] #接受帧刷新对象 if server_address is None: class Marker(): def __init__(self, sa): pass def send_marker(self, marker): raise Exception( 'marker_sender was not initilized because server_address parameter was not given' ) self.marker_sender = Marker(server_address) #注册刺激,生成实例 for ID in stims: element = stims[ID] clas = element['class'] if clas in MODULES: self.stimuli[ID] = MODULES[clas](self.screen, **element['parm']) backthread = threading.Thread(target=self.backthreadfun, args=(), daemon=True) backthread.start()
def test_delete_at_should_return_false_when_guess_not_present(self): m = Marker("") assert not m.delete_first("5", list("1234"))
# read new data (if any) from the TCP connection try: DATA += LMSOCKET.recv(BUFFER_SIZE) except socket.error: # no data is available on the socket, sleep for 100ms sleep(0.1) # try again continue # check the data against the marker regular expression to check if there is complete marker information in the received data MARKER_RE_MATCH = re.search(MARKER_REGEX, DATA) # as long as we still have more complete marker information... while MARKER_RE_MATCH != None: # find the first marker information from the received data MARKER = Marker(MARKER_RE_MATCH.group()) # show that we have found something #print("Found marker {0}.".format(MARKER.number)) # update information in the marker collection MARKERS.update_marker(MARKER) # remove outdated observations MARKERS.clean_outdated_markers(MARKER_TIMEOUT) # remove the processed data from the buffer DATA = DATA[MARKER_RE_MATCH.end():] # look for the next marker in the buffer MARKER_RE_MATCH = re.search(MARKER_REGEX, DATA) # send something arbitrary to the Zigbee dongle. This has no real function except to demonstrate how to send something. ZIGBEE.write(b'Hello') if len(DATA) > BUFFER_SIZE:
def __init__(self, color): Marker.__init__(self, color, 3, 0.005, False)
class bciApp(bciCore): def __init__(self): super(bciApp, self).__init__(config_path=r'./config.js') self.PHASES = [{ 'name': 'start', 'next': 'test', 'duration': 1 }, { 'name': 'test', 'next': 'relax', 'duration': 5 }, { 'name': 'relax', 'next': 'test', 'duration': 3 }, { 'name': 'rrelax', 'next': 'stop', 'duration': 1 }] self.CODER = DefaultCoder() layout = { 'screen': { 'size': (1000, 250), 'color': (0, 0, 0), 'type': 'normal', 'Fps': 60, 'caption': 'this is an example' }, 'ssvep1': { 'class': 'sinBlock', 'parm': { 'size': (150, 150), 'position': (125, 125), 'bordercolor': (0, 0, 0), 'anchor': 'center', 'frequency': 6, 'visible': True, 'start': False } }, 'ssvep2': { 'class': 'sinBlock', 'parm': { 'size': (150, 150), 'position': (375, 125), 'bordercolor': (0, 0, 0), 'anchor': 'center', 'frequency': 7, 'visible': True, 'start': False } }, 'ssvep3': { 'class': 'sinBlock', 'parm': { 'size': (150, 150), 'position': (625, 125), 'bordercolor': (0, 0, 0), 'anchor': 'center', 'frequency': 8, 'visible': True, 'start': False } }, 'ssvep4': { 'class': 'sinBlock', 'parm': { 'size': (150, 150), 'position': (875, 125), 'bordercolor': (0, 0, 0), 'anchor': 'center', 'frequency': 9, 'visible': True, 'start': False } }, } self.gui = GuiIF(None, layout) _ = multiprocessing.Process(target=guiengine_proc, args=(self.gui.args, )) _.start() self.gui.wait() sp_ip = self.configs['signal_processing']['sp_host_ip'] sp_port = self.configs['signal_processing']['sp_host_port'] self.marker = Marker((sp_ip, sp_port)) self.TEST_NUM = 80 self.test_count = 1 def transition(self, phase): write_log(phase) if phase == 'test': self.gui.update( { 'ssvep1': { 'start': True, 'bordercolor': (0, 0, 0) }, 'ssvep2': { 'start': True, 'bordercolor': (0, 0, 0) }, 'ssvep3': { 'start': True, 'bordercolor': (0, 0, 0) }, 'ssvep4': { 'start': True, 'bordercolor': (0, 0, 0) } }, {}) t = global_clock() print(t) self.marker.send_marker( {'process': { 'value': [1], 'timestamp': [t] }}) #开始测试 elif phase == 'relax': self.gui.update( { 'ssvep1': { 'start': False }, 'ssvep2': { 'start': False }, 'ssvep3': { 'start': False }, 'ssvep4': { 'start': False } }, {}) self.marker.send_marker( {'process': { 'value': [2], 'timestamp': [global_clock()] }}) #测试完成 self.test_count += 1 if self.test_count > self.TEST_NUM: self.change_phase('rrelax') def stop_run(self): self.gui.quit() def process(self, result): if result is not None: print(result) mapp = {'6': 'ssvep1', '7': 'ssvep2', '8': 'ssvep3', '9': 'ssvep4'} self.gui.update({mapp[str(result)]: { 'bordercolor': (255, 0, 0) }}, {})
def test_delete_at_should_delete_when_guess_present(self): m = Marker("") lst = list("1234") m.delete_first("3", lst) assert list("124") == lst
def test_total_match_count_returns_4_for_4_total_matches(self): m = Marker("1234") assert m.total_match_count("1324") == 4
def test_number_match_count_returns_2_for_2_number_matches(self): m = Marker("1234") assert m.exact_match_count("1324") == 2
import tornado.ioloop import tornado.websocket import tornado.options import tornado.web import tornado.escape import uuid import os import redis from marker import Marker from tornado.options import define, options from analyzer import Analyzer define("port", default=8880, help="run on the given port", type=int) DATASTORE_KEY = "15s_learning_data_" G_Marker = Marker() #store data #G_Database = redis.Redis(host='localhost', port=6379, db=0) class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html") class TextSocketHandler(tornado.websocket.WebSocketHandler): waiters = set() Store_Number = 101 Store_y = 0 cache = []
def __init__(self): print '<===INIT QUALIFY===>' self.marker = Marker() self.gate = Gate()
def __init__(self) -> None: self._zero_metadata: Metadata = {} self._marker = Marker("api_route", default=self._zero_metadata)
def __init__(self, code_nr=None): Marker.__init__(self,code_nr) if code_nr is not None: self.init(code_nr)
from marker import Marker import random learn=Marker() learn.load() x={"speed":0,"sentenceSpeed":0} i=0 while i<10: x["speed"]=random.random()*5+3 x["sentenceSpeed"]=random.random()*3/15 i+=1 y0,y1=learn.cal_score(x) print (str(x["speed"])+"word/s : score"+str(y0*100)) print (str(x["sentenceSpeed"])+"space/s : score"+str(y1*100))
def detectRectangles(self, thresImg): markerCandidates = [] minSize = self.minSize*max(thresImg.shape)*4 maxSize = self.maxSize*max(thresImg.shape)*4 self.thres2 = thresImg contours2, hierarchy2 = cv.findContours(self.thres2, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) counterIndx = 0 for contour in contours2: if(minSize < contour.shape[0] and contour.shape[0] < maxSize): approxCurve = cv.approxPolyDP(contour, contour.shape[0]*0.05, True) approxCurve = numpy.vstack(approxCurve) if approxCurve.shape[0] == 4: if cv.isContourConvex(approxCurve): minDist = 1e10 for j in range(0,4): d = math.sqrt( (approxCurve[j][0]-approxCurve[(j+1)%4][0])**2 + (approxCurve[j][1]-approxCurve[(j+1)%4][1])**2 ) if d < minDist: minDist = d if minDist > 10: m = Marker(approxCurve) m.candidateIdx = counterIndx markerCandidates.append(m) counterIndx += 1 #arrange in anti-clockiwise swapped = [] for i in range(0,len(markerCandidates)): dx1 = markerCandidates[i].corners[1][0] - markerCandidates[i].corners[0][0] dy1 = markerCandidates[i].corners[1][1] - markerCandidates[i].corners[0][1] dx2 = markerCandidates[i].corners[2][0] - markerCandidates[i].corners[0][0] dy2 = markerCandidates[i].corners[2][1] - markerCandidates[i].corners[0][1] o = ( dx1*dy2 )- ( dy1*dx2 ) if(o < 0.0): markerCandidates[i].corners[1], markerCandidates[i].corners[3] = markerCandidates[i].corners[3], markerCandidates[i].corners[1] swapped.append(True) else: swapped.append(False) #remove those elements whose corners are too close to each other return markerCandidates
def __init__(self, code_nr=None): Marker.__init__(self, code_nr) if code_nr is not None: self.init(code_nr)
class VODemo: vo = None frame = 0 def __init__(self, mode, library): self.mode = mode self.library = library rospy.TopicSub('/videre/images', ImageArray, self.display_array) rospy.TopicSub('/videre/cal_params', String, self.display_params) rospy.TopicSub('/vo/tmo', Pose44, self.handle_corrections) self.viz_pub = rospy.Publisher("/overlay", Lines) self.vo_key_pub = rospy.Publisher("/vo/key", Frame) self.Tmo = Pose() self.mymarker1 = Marker(10) self.mymarkertrail = [ Marker(11 + i) for i in range(10) ] self.trail = [] self.vis = Vis() def display_params(self, iar): if not self.vo: cam = camera.VidereCamera(iar.data) print "cam.params", cam.params self.vo = VisualOdometer(cam) self.started = None if self.mode == 'learn': self.library = set() self.previous_keyframe = None self.know_state = 'lost' self.best_show_pose = None self.mymarker1.floor() def handle_corrections(self, corrmsg): print "GOT CORRECTION AT", time.time() Tmo_pose = Pose() Tmo_pose.fromlist(corrmsg.v) self.Tmo = Tmo_pose self.know_state = 'corrected' def display_array(self, iar): diag = "" af = None if self.vo: if not self.started: self.started = time.time() imgR = imgAdapted(iar.images[0]) imgL = imgAdapted(iar.images[1]) af = SparseStereoFrame(imgL, imgR) if 1: if self.mode == 'play': pose = self.vo.handle_frame(af) if self.mode == 'learn': pose = self.vo.handle_frame(af) if (af.id != 0) and (self.vo.inl < 80): print "*** LOST TRACK ***" #sys.exit(1) self.library.add(self.vo.keyframe) else: #diag = "best match %d from %d in library" % (max(probes)[0], len(self.library)) pass diag = "%d/%d inliers, moved %.1f library size %d" % (self.vo.inl, len(af.kp), pose.distance(), len(self.library)) if self.mode == 'play': kf = self.vo.keyframe if kf != self.previous_keyframe: f = Frame() f.id = kf.id f.pose = Pose44(kf.pose.tolist()) f.keypoints = [ Keypoint(x,y,d) for (x,y,d) in kf.kp ] f.descriptors = [ Descriptor(d) for d in kf.descriptors ] print "ASKING FOR MATCH AT", time.time() self.vo_key_pub.publish(f) self.previous_keyframe = kf if kf.inl < 50 or self.vo.inl < 50: self.know_state = 'lost' else: self.know_state = { 'lost':'lost', 'uncertain':'uncertain', 'corrected':'uncertain' }[self.know_state] result_pose = af.pose if self.mode == 'learn': self.mymarker1.frompose(af.pose, self.vo.cam, (255,255,255)) else: if self.best_show_pose and self.know_state == 'lost': inmap = self.best_show_pose else: Top = af.pose Tmo = self.Tmo inmap = Tmo * Top if self.know_state != 'lost': self.best_show_pose = inmap if self.know_state != 'lost' or not self.best_show_pose: color = { 'lost' : (255,0,0), 'uncertain' : (127,127,0), 'corrected' : (0,255,0) }[self.know_state] self.mymarker1.frompose(inmap, self.vo.cam, color) if 0: self.trail.append(inmap) self.trail = self.trail[-10:] for i,p in enumerate(self.trail): self.mymarkertrail[i].frompose(p, color) #print af.diff_pose.xform(0,0,0), af.pose.xform(0,0,0) if self.frame > 5 and ((self.frame % 10) == 0): inliers = self.vo.pe.inliers() pts = [(1,int(x0),int(y0)) for ((x0,y0,d0), (x1,y1,d1)) in inliers] self.vis.show(iar.images[1].data, pts ) if False and self.vo.pairs != []: ls = Lines() inliers = self.vo.pe.inliers() lr = "left_rectified" ls.lines = [ Line(lr, 0,255,0,x0,y0-2,x0,y0+2) for ((x0,y0,d0), (x1,y1,d1)) in inliers] ls.lines += [ Line(lr, 0,255,0,x0-2,y0,x0+2,y0) for ((x0,y0,d0), (x1,y1,d1)) in inliers] rr = "right_rectified" #ls.lines += [ Line(rr, 0,255,0,x0-d0,y0-2,x0-d0,y0+2) for ((x0,y0,d0), (x1,y1,d1)) in inliers] #ls.lines += [ Line(rr, 0,255,0,x0-2-d0,y0,x0+2-d0,y0) for ((x0,y0,d0), (x1,y1,d1)) in inliers] self.viz_pub.publish(ls) if (self.frame % 30) == 0: took = time.time() - self.started print "%4d: %5.1f [%f fps]" % (self.frame, took, self.frame / took), diag self.frame += 1 #print "got message", len(iar.images) #print iar.images[0].width if SEE: right = ut.ros2cv(iar.images[0]) left = ut.ros2cv(iar.images[1]) hg.cvShowImage('channel L', left) hg.cvShowImage('channel R', right) hg.cvWaitKey(5) def dump(self): print print self.vo.name() self.vo.summarize_timers() if self.mode == 'learn': print "DUMPING LIBRARY", len(self.library) f = open("library.pickle", "w") pickle.dump(self.vo.cam, f) db = [(af.id, af.pose, af.kp, af.descriptors) for af in self.library] pickle.dump(db, f) f.close() print "DONE"
class MarkerTest(object): """Implements methods necessary for unit tests.""" def __init__(self, *args, **kwargs): """Initializes test with or without initial preview check.""" super(MarkerTest, self).__init__(*args, **kwargs) def setUp(self): """Prepare PTYs, MarkerEmulator and Marker.""" logging.debug("setting up..") self.running = True """ # create two connected PTYs cmd = ['socat', '-d', '-d', 'pty,raw,echo=0', 'pty,raw,echo=0'] try: self.socat = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT) with self.socat.stdout as stdout: marker_pty = stdout.readline().split()[-1] if not os.path.exists(marker_pty): raise Exception('PTY creation failed.') client_pty = stdout.readline().split()[-1] except OSError: raise Exception('%s is not installed' % cmd[0]) """ marker_pty = b'/dev/pts/3' client_pty = b'/dev/pts/4' self.marker_emu = MarkerEmulator(str(marker_pty, encoding='UTF-8')) self.marker_emu.start() self.marker_client = Marker(str(client_pty, encoding='UTF-8')) self.marker_client.start() def tearDown(self): """Clean up and shut down.""" logging.debug("tearing down..") self.marker_client.running = False del self.marker_client self.marker_emu.running = False del self.marker_emu #os.kill(self.socat.pid, signal.SIGINT) def check_commands_executed(self): """Checks if all commands that got sent were executed.""" self.assertEqual(self.marker_client.count['ST'].done, self.marker_client.count['ST'].tbd) def move(self, move_method, *args, **kwargs): """Move test wrapper.""" done_before = self.marker_client.count['ST'].done move_method(*args, **kwargs) time.sleep(1) # independent movement check self.assertEqual(self.marker_client.count['ST'].done, done_before + 1) def random_x_position(self, min=0.01, max=None): """Returns a random, but valid x position.""" if max is None: max = self.marker_client.MAX_X return random.randint(min*100, max*100) / 100.0 def random_y_position(self, min=0.01, max=None): """Returns a random, but valid y position.""" if max is None: max = self.marker_client.MAX_Y return random.randint(min*100, max*100) / 100.0
def run(self): """ Main application function. Starts image stream from real or simulated camera (or loads a single image); initialises any pipeline modules; and then enters the main pipeline processing loop. Once in the loop the pipeline runs until a shutdown flag is set. The message queue from the simulator, if there is one, is checked on each loop iteration for image data and synchronisation messages (such as a shutdown message). """ self.running = True if self.fwcam and not self.single_img: self.fwcam.start(interactive=True) time.sleep(1) self.orig = np.copy(np.asarray(self.fwcam.current_image)) self.canv = np.copy(self.orig) self.init_output = Pipeline_Output(sim=False) self.init_output.cam = Camera_Vals(camera_id='chameleon1') self.init_output.markers.append(Marker(cam=self.init_output.cam)) elif self.options.simulate is not None: self.q2sim = multiprocessing.Queue() self.q2pipe = multiprocessing.Queue() queues = self.q2sim, self.q2pipe args = queues, self.options process = multiprocessing.Process(name='child', target=GL_Simulator, args=args) self.processes.append(process) process.start() self.init_output = Pipeline_Output(sim=True) self.q2sim.put(copy.deepcopy(self.init_output)) incoming = self.q2pipe.get() if 'stop' in incoming: self.shutdown() elif 'simulglob' in incoming: _, self.orig, output = incoming self.outputs.append(copy.deepcopy(output)) self.init_output.cam = self.outputs[0].cam m = [] if self.options.nr_modules == 0: self.logger.info('running an empty pipeline') else: if self.options.nr_modules >= 1: self.modules.append(ContourFinder(pipeline=self)) if self.options.nr_modules >= 2: self.modules.append(EllipseFitter(pipeline=self)) if self.options.nr_modules >= 3: self.modules.append(PoseEstimatorA(pipeline=self)) self.logger.info('running with %d modules' % self.options.nr_modules) if self.options.windows: for module in self.modules: if not (module.__class__.__name__ == 'PoseEstimatorA'): self.windows.append(module.__class__.__name__) if self.single_img: for module in self.modules: module.run() if self.options.windows: cv2.imshow(module.__class__.__name__, self.canv) cv2.waitKey(2) time.sleep(5) self.shutdown() while self.running: if self.fwcam: self.orig = np.copy(np.asarray(self.fwcam.current_image)) if self.options.windows: cv2.imshow("original", self.orig) elif (self.options.simulate is not None) and self.outputs[-1].end_time: incoming = self.q2pipe.get() if 'stop' in incoming: self.running = False continue elif 'simulglob' in incoming: _, self.orig, output = incoming self.outputs.append(copy.deepcopy(output)) self.new_output = True else: self.logger.error('unknown in queue: \'%s\'' % incoming) self.shutdown() self.canv = np.copy(self.orig) for module in self.modules: module.run() classname = module.__class__.__name__ if not (self.options.windows and classname == 'PoseEstimatorA'): cv2.imshow(module.__class__.__name__, self.canv) self.loops += 1 self.outputs[-1].complete() if self.ellipses: self.ellipses = None if self.options.windows: cv2.waitKey(2) if time.time() - self.start >= self.options.simtime: self.running = False self.shutdown()
class GuiEngine(): stimuli = {} __release_ID_list = [] def __init__(self, stims, Q_c2g, E_g2c, server_address): """ stims: dict to define a stimulus. eg. stims = {'cue':{'class':'Block','parm':{'size':(100,40),'position':(0,0)}}} Q_c2g: multiprocessing.Queue, used for accepting stimulus control command from core process kwargs: server_address = ?, the target server's address to accept marker property for describe screen size: (width,height) type: fullscreen/normal frameless: True/False color: (R,G,B) caption: string Fps: int, strongly suggest you set Fps as the same with system's Fps """ self.Q_c2g = Q_c2g self.E_g2c = E_g2c self.marker_on = False self.marker_event = {} self.stp = False self.lock = threading.Lock() pygame.init() #初始化screen # 当且仅当window环境变量设置成功且fullscreen时,SCREEN_SYNC=True if stims['screen']['type'].lower() == 'fullscreen': self.screen = pygame.display.set_mode( (0, 0), FULLSCREEN | DOUBLEBUF | HWSURFACE, 32) else: # 将窗口置中 if OS == 'windows': w, h = stims['screen']['size'] x = int((SCRW - w) / 2.) y = int((SCRH - 50 - h) / 2.) - 50 # 扣除任务栏高度 os.environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i' % (x, y) self.screen = pygame.display.set_mode(stims['screen']['size'], NOFRAME | DOUBLEBUF, 32) SCREEN_SYNC = False self.screen_color = stims['screen']['color'] self.screen.fill(self.screen_color) pygame.display.set_caption(stims['screen']['caption']) self.Fps = stims['screen']['Fps'] del stims['screen'] self.ask_4_update_gui = False #线程接收到刷新请求后,通知主进程刷新 self.update_in_this_frame = False #主进程在一帧真实的刷新帧中确定能够进行刷新 self.__update_per_frame_list = [] #接受帧刷新对象 if server_address is None: class Marker(): def __init__(self, sa): pass def send_marker(self, marker): raise Exception( 'marker_sender was not initilized because server_address parameter was not given' ) self.marker_sender = Marker(server_address) #注册刺激,生成实例 for ID in stims: element = stims[ID] clas = element['class'] if clas in MODULES: self.stimuli[ID] = MODULES[clas](self.screen, **element['parm']) backthread = threading.Thread(target=self.backthreadfun, args=(), daemon=True) backthread.start() def backthreadfun(self): #接收刷新请求字典 # arg = self.Q_c2g.get() # arg可能的形式: # 1. 单字符串:'_q_u_i_t_' -> 结束标志 # 2. 列表:[stimulus setting, marker] -> 刺激设置,marker标志 # 3. marker: eg. {'mkr1':{'value':[0]}} while True: arg = self.Q_c2g.get() if arg == '_q_u_i_t_': self.stp = True #用于终止主程序 break stimulus_arg, self.marker_event = arg #marker is a dict self.lock.acquire() [ self.stimuli[id].reset(**stimulus_arg[id]) for id in stimulus_arg ] #更新刺激实例的参数 self.ask_4_update_gui = True #请求刷新 self.lock.release() print('[guiengine] sub thread ended') def StartRun(self): print('[guiengine] process started') self.E_g2c.set() clock = pygame.time.Clock() # END = 0 while True: self.screen.fill(self.screen_color) if self.ask_4_update_gui: #子线程请求刷新 self.update_in_this_frame = True #将在这一帧刷新 self.ask_4_update_gui = False #.items()方法将stimuli字典转换为列表,元素为元祖,k[0]对应ID, k[1]为具体的刺激对象实例 # 意思是按照layer属性排序 stis = sorted(self.stimuli.items(), key=lambda k: k[1].layer) [s[1].show() for s in stis] #按照图层书序顺序绘图,layer越大,越顶层 pygame.display.flip() #该帧刷新完毕 if not SCREEN_SYNC: clock.tick(self.Fps) # 有刷新请求,且在该帧完成了刷新 if self.update_in_this_frame: _clk = global_clock() if len(self.marker_event) > 0: #确实接受到了marker for ky in self.marker_event: # 将时间戳记录下来 self.marker_event[ky]['timestamp'] = _clk self.marker_sender.send_marker(self.marker_event) self.update_in_this_frame = False pygame.event.get() if self.stp: break #只能通过主控结束 pygame.quit() [self.stimuli[id].release() for id in self.stimuli] #更新刺激实例的参数 print('[guiengine] process exit')
def start(self, secret): self.secret = secret self.marker = Marker(self.secret)
def test_delete_at_should_return_true_when_guess_present(self): m = Marker("") assert m.delete_first("3", list("1234"))