def __init__( self, screen, x, y, width, height, vertical, frame1 ): log.debug( "SplitFrame.__init__" ) Frame.__init__( self, screen, x, y, width, height ) self.split_dragging = False self.vertical = vertical if vertical: self.split = height else: self.split = width self.split = self.split >> 1 self.frame1 = frame1 if vertical: self.frame1.moveresize( x, y, self.width, self.height - self.split ) # this is a horrible hack to get around circular importing self.frame2 = self.frame1.topmost_child().__class__( self.screen, self.x, self.y + self.split+4, self.width, self.height-self.split-4) else: self.frame1.moveresize( x, y, self.width - self.split, self.height ) self.frame2 = self.frame1.topmost_child().__class__( self.screen, self.x + self.split+4, self.y, self.width-self.split-4, self.height) frame1.tritium_parent.replace_me( frame1, self ) self.frame1.tritium_parent = self.frame2.tritium_parent = self self.frame1.activate() self.create_split_window()
class Hooker(object): def __init__(self, number=None): self.nFrame = number self.i = -1 return def startLoop(self, title="start loop:"): print title self.bar = ProgressBar().start() return def endLoop(self): self.bar.finish() def newFrame(self): self.frame = Frame() self.i += 1 return def postFrame(self): self.bar.update((self.i+1)*100/self.nFrame) return def dataHooker(self, name, sz, arr): self.frame.loadIntoMemory(name, sz, arr) return
def _generatePickledFrames(self): """ Initialises all the frames """ framePath = FRAMENET_PATH+ '/' + FRAME_DIR_ENV + '/' + FRAME_FILE pickledFramePath = FRAMENET_PATH + '/' + PICKLED_FRAME_FILE print >> sys.stderr, 'Loading xml for frames ...', doc = xml.dom.minidom.parse(framePath) frameNodes = getNoneTextChildNodes(doc.childNodes[1]) print >> sys.stderr, 'done', frames = {} print >> sys.stderr, 'parsing each frame ...', for fn in frameNodes: f = Frame() f.loadXMLNode(fn) frames[f['ID']] = f print >> sys.stderr, 'done', print >> sys.stderr, 'saving the frames ...', cPickle.dump(frames, open(pickledFramePath, 'w'), cPickle.HIGHEST_PROTOCOL) print >> sys.stderr, 'done' pass
def hide( self ): Frame.hide( self ) self.tabs.hide() if self.shown: for window in self.windows: window.hide() self.shown = False
class nCacheHooker(object): def __init__(self, number=None): self.nFrame = number self.i = -1 def startLoop(self, title="start loop:"): print title def endLoop(self): pass def resetPass(self): self.i2 = -1 def newFrame(self): if self.i < 0: print "Reading on cache." self.bar = ProgressBar().start() self.frame = Frame() self.i += 1 self.i2 += 1 def postFrame(self): self.bar.update((self.i2 + 1) * 100 / self.nFrame) if self.i2 == self.nFrame - 1: self.bar.finish() def dataHooker(self, name, sz, arr): self.frame.loadIntoMemory(name, sz, arr)
def __init__(self, screen, game): ''' An example class to show how to creat custom GUI frames''' Frame.__init__(self, screen, (330, 110), (10, 10)) self.game = game self.b1 = Button(self, "Toggle Constant", self.command) self.b2 = Button(self, "Toggle Random", self.command2, position=(5,40)) self.b3 = Button(self, "Toggle Shape", self.command3, position=(5, 75)) self.c_Label = Label(self, "False", position=(135, 10)) self.r_Label = Label(self, "True", position=(135, 45)) self.s_Label = Label(self, "Random", position=(135, 80)) self.l1 = Label(self, "Size", position=(235, 10)) self.tb1 = TextBox(self, (295, 10)) self.l2 = Label(self, "Density", position=(235, 45)) self.tb2 = TextBox(self, (295, 45)) self.l3 = Label(self, "Speed", position=(235, 80)) self.tb3 = TextBox(self, (295, 80)) self.textboxes = {'size' : self.tb1, 'density' : self.tb2, 'speed' : self.tb3} self.current = 'circle'
def query(self, mode, pid): frame = Frame() frame.id = OBD2_REQUEST frame.len = 8 frame.flags = 0 frame.data = (2, mode, pid, 0x55, 0x55, 0x55, 0x55, 0x55) self.can.adapter.write(frame)
def show( self ): Frame.show( self ) self.tabs.show() if not self.shown: for window in self.windows: window.show() self.shown = True
def TestFPTracking(): i = cv2.imread("../img/s01.jpg") f = Frame(i, True) f.detectKeyPoints() j = cv2.imread("../img/s02.jpg") g = Frame(j, True) kp = g.trackKeyPoints(f)
def work(self, input_items, output_items): inp = input_items[0] for i in range(len(inp)): data = [] for x in inp[i]: data.append(x&0xFF) data.append(x>>8) frame = Frame(data, self.frame_prev) if not frame: print "no frame" + str(frame) continue if not frame.is_broken(): self.frame_prev = frame self.conf = frame.get(SF_TYPE_CONFIG) if self.conf is not None: frame_num = self.conf.frame_num node_id = self.conf.id calibration_num = self.conf.calibration_num de = DataEvent([EVENT_CONFIG, frame_num, node_id, calibration_num]) wx.PostEvent(self.panel, de) del de else: frame_num = 'N/A' self.meas = frame.get(SF_TYPE_MEASUREMENTS) if self.meas is not None and self.conf is not None: temp = self.meas.temp hum = self.meas.hum_down pres = self.meas.pressure de = DataEvent([EVENT_MEASSURE, self.conf.frame_num, temp, hum, pres]) wx.PostEvent(self.panel, de) del de #self._dump_frame(frame, frame_num) #self._dump_eval(frame) if not self.calibrated and self.conf is not None: self.calibrated = self.calib.addFragment(self.conf.calibration_num, self.conf.calibration_data) if self.calibrated: print("calibration complete at frame %s" % frame_num) calib_data = self.calib.data() self.calib = Calibration(calib_data) de = DataEvent([EVENT_CALIBRATED]) wx.PostEvent(self.panel, de) del de print("frame: %s %s" % (frame_num, not frame.is_broken(), )) return len(inp)
def append( self, window ): Frame.append( self, window ) tab = Tab( self, window ) window.tab = tab self.tabs.append( tab ) if not self.visible(): tab.hide() tab.set_text( window.get_title() ) # shouldn't some of the stuff above be moved into tab_manage? window.tab_manage()
def TestFeaturesMask(): img = cv2.imread("../img/s01.jpg") ff = Frame(img, True) #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) mask = np.zeros(img.shape[:2], np.uint8) mask[200:500, 0:300] = 1 kp = ff.detectKeyPoints(mask=mask) ff.showDetectedKeyPoints()
def draw( self, surface, interpol, time_sec ): surface.fill( (50,50,50) ) #resman.get("game.hud_left_surf").draw( surface, Vec2D(0,0) ) frame = Frame( surface, time_sec, interpol ) frame.X_OFFSET = MonorailEditor.X_OFFSET frame.Y_OFFSET = MonorailEditor.Y_OFFSET frame.draw_z( [self.level] ) if self.current_tile in [MonorailEditor.FLAT, MonorailEditor.ENTERANCE]: frame.draw( self.edit_tile1 ) elif self.current_tile in [MonorailEditor.NORTH_SLOPE ,MonorailEditor.EAST_SLOPE, MonorailEditor.SOUTH_SLOPE, MonorailEditor.WEST_SLOPE]: frame.draw( self.edit_tile1 ) frame.draw( self.edit_tile2 ) elif self.current_tile == MonorailEditor.ERASE: pass # draw filename font = pygame.font.Font( None, 24 ) render_text = font.render( Level.get_filename( self.level_nr ), 0, (255,255,255) ) surface.blit( render_text, (100,10) )
class Dialogue(object): def __init__(self, title, message, buttons={}): frame_title = "dialogue_{0}".format(title) self.frame = Frame(frame_title, x, y, width, height, z) self.frame.title = title self.buttons = buttons self.frame.closeable = True background = pygame.surface.Surface((width, height)) background.blit(gradients.vertical((width, height), colors.OPAQUE_GRAY, colors.OPAQUE_BLACK), (0, 0)) self.frame.background = background textbox = self.frame.background.subsurface(pygame.Rect(50, 40, text_width, text_height)) button_rect = pygame.Rect(25, 60 + text_height, button_width, 25) button_area = self.frame.background.subsurface(button_rect) textrect(message, ui.typography["title"], text_width, colors.WHITE, surface=textbox) created_buttons = [] for button_create in buttons: created_buttons.append(Button(button_create[0], button_create[1])) total = len(created_buttons) padding = (button_width - (total * button.width)) / (total + 1) button_index = 0 for btn in created_buttons: btn_offset = button_area.get_abs_offset() btn_rect = pygame.Rect((padding * (button_index + 1)) + (button.width * button_index), 0, button.width, button.height) button_area.blit(btn.image, (btn_rect.x, btn_rect.y)) btn_rect.x += btn_offset[0] + x btn_rect.y += btn_offset[1] + y game.addClickableFrame(btn_rect, self.frame, btn.action, 2) button_index += 1 @property def title(): return self.frame.title @title.setter def title(self, value): self.frame.title = value @property def display(self): return self.frame.display @display.setter def property(self, value): self.frame.display = value def toggle(self): self.frame.toggle()
def __init__(self, rect, font, name=None): Frame.__init__(self, rect) self.active = True self.font = font self.textcolor = BUTTONTEXTCOLOR self.backgroundcolor = BUTTONBACKGROUNDCOLOR self.name = name self.textframe = None # Margins self.top = 1
def init(): global frame, image frame = Frame(frame_name, x, y, width, height, 1) frame.closeable = True frame.pages = True frame.background = pygame.image.load(imagePath("frame_bg.jpg")) image = pygame.image.load(imagePath("skilltree.png")) image = image.convert_alpha() ui.trigger("menu_bar", "addTrigger", "skill_tree", image, toggle, tooltip, description) renderSkills()
def recv(self): while True: data = self.sock.recv(1024) frm = Frame.decode(data) if frm.opcode == 0x1: # text return str(frm) elif frm.opcode == 0x8: # close self.close() return None elif frm.opcode == 0x9: # ping pong = Frame(opcode=0xA, payload=str(frm)) self.request.send(pong.build())
def run_analysis(): # turn on interactive use of pyplot plt.ion() # read in and process dark file if it's supplied dark_frames = None if args.dark != None: print('Reading dark frames from ', args.dark) filename_dark_flat = utils.get_flat_filename( args.dark, args.tag ) #process the dark file dark_frames = Dark(args.dark,filename_dark_flat,100) else: print('No dark file supplied.') # read frames one by one, either from file or shared memory if args.light != None: print('Read frames from data file ', args.light) # create the frame object that holds the frame frame = Frame() frame.debug = True # read the binary file idx = 0 nframes = 0 with open(args.light,'rb') as f: while True: try: print('Read frame from file idx ', idx) # read a frame from the file idx = frame.read_frame_from_file(f, idx) print('frame at index ', frame.index) print('frame nframes ', frame.nframes) print('new idx ', idx) print('raw frame ', frame.raw_frame) print('flat frame ', frame.flat_frame) nframes += frame.nframes print('Read ', nframes, ' so far') except IndexError: print(' caught IndexError: found ', nframes, nframes) break else: print('Read frames from shared mem is not implemented yet')
def __init__(self, rect, font): Frame.__init__(self, rect) self.active = True self.font = font self.textcolor = TEXTCOLOR self.name = None self.lines = [] # Margins self.top = 1 self.bottom = 1 self.left = 1 self.right = 1
def next(self): if self._closed: raise ValueError('I/O operation on closed file') nfi = self._nfi if nfi < self._n_csets: unitcell = self._nextUnitcell() coords = self._nextCoordset() if self._ag is None: frame = Frame(self, nfi, coords, unitcell) else: frame = self._frame Frame.__init__(frame, self, nfi, None, unitcell) return frame
def __init__(self, rect, font): Frame.__init__(self, rect) self.active = True self.font = font self.textcolor = TEXTCOLOR self.name = None self.lines = [''] self.char_limit = 40 self.textframe = None # Margins self.top = 1 self.right = 1 self.left = 1
def wrap_run_method(self, instance, method, *args): klass = instance frame = Frame(max_locals=1) if isinstance(instance, ClassInstance): klass = instance._klass frame.push(instance) if args: frame.stack += args self.frame_stack.append(frame) self.run_method(klass, method) self.run_bytecode() self.frame_stack.pop() if frame.stack: return frame.stack.pop() return void
def newFrame(self): if self.i < 0: print "Reading on cache." self.bar = ProgressBar().start() self.frame = Frame() self.i += 1 self.i2 += 1
def main(argv): radius = 200 if "-h" in argv: print('''Usage: python main.py [OPTIONS] Available options: -h : show this help -s : run test suite of algorithms non-interactively without GUI -r [number] : set the radius of the frame to number''') else: if "-r" in argv: if argv.index("-r") < len(argv) - 1: try: radius = float(argv[argv.index("-r") + 1]) if not (100 <= radius <= 500): raise ValueError except ValueError: print("-r expects a number between 100 and 1000, setting default value of 200.") radius = 200 else: print("-r needs an argument (number), setting default value of 200.") frame = Frame(radius) if "--sim" in argv or "-s" in argv: display_welcome_message(radius) frame.start() compare_algorithms(frame) frame.request_end() else: app = QtGui.QApplication(argv) mw = MainWindow(frame) frame.start() sys.exit(app.exec_())
def read(self,data): if not self._frame: self._frame=Frame() try: self._buffer=self._frame.unpack(data) return False except FrameUnpackVerifyError: logging.error("stream connection %s frame verify error",self) self.close() return False except FrameUnpackFinish,e: if self._frame.session_id==0 and self._frame.stream_id==0 and self._frame.frame_id==0: self.control(self._frame) else: self.emit("frame",self,self._frame) self._frame=Frame() self._buffer=e.data return True
def __init__(self, assembler, rt, ctx): self.assembler = AssemblerWrapper(assembler) self.frame = Frame(self.assembler) self.rt = rt # runtime self.ctx = ctx self.state = { 'with': False, 'eval': False }
def next(self): if self._closed: raise ValueError('I/O operation on closed file') nfi = self._nfi if nfi < self._n_csets: traj = self._trajectory while traj._nfi == traj._n_csets: self._nextFile() traj = self._trajectory unitcell = traj._nextUnitcell() coords = traj._nextCoordset() if self._ag is None: frame = Frame(self, nfi, coords, unitcell) else: frame = self._frame Frame.__init__(frame, self, nfi, None, unitcell) self._ag.setACSLabel(self._title + ' frame ' + str(self._nfi)) self._nfi += 1 return frame
def parse_frame(self): ''' Parse an HTTP/2 frame ''' bin_frame_header = self.get_bytes(9) frame_header = Frame.parse_header(bin_frame_header) frame_body = struct.unpack( 'c' * frame_header['length'], self.get_bytes(frame_header['length']) ) # Create the frame object frame = Frame(frame_header, frame_body) # Create (and) or get the Stream object if not self.streams.has_key(frame_header['stream']): self.streams[frame_header['stream']] = Stream(frame_header['stream']) stream = self.streams[frame_header['stream']] stream.add_frame(frame) if self.debug: frame.print_info()
def handle(self): self.handshake(self.request.recv(1024)) while True: data = self.request.recv(1024) if not len(data): continue # empty data (sent by chrome) frm = Frame.decode(data) frm.unmask() if frm.opcode == 0x1: # text msg = 'you said: ' + str(frm) reply = Frame(payload=msg) self.request.send(reply.build()) elif frm.opcode == 0x8: # close self.close_connection() break elif frm.opcode == 0x9: # ping pong = Frame(opcode=0xA, payload=str(frm)) self.request.send(pong.build()) elif frm.opcode == 0xA: # pong continue
def read_frame_data_gen_internal( source: Union[str, IO] ) -> Generator[Frame, None, None]: """ Creates an iterator from source_iterable and yields Frame objects. """ for node in frame_elements(eTree.iterparse(source)): # get data time = try_get_frame_time_from_node(node) size = node.get("pkt_size") pict_type = node.get("pict_type") # clear node to free parsed data node.clear() # construct and append frame yield Frame( time=time if time else 0, size=int(size) if size else 0, pict_type=pict_type if pict_type else "?" )
def __init__(self, object, anim_name): try: conffile = open(data.animpath(object, anim_name)) except: conffile = open(data.animpath("default", "static")) tiley = 0 values = [] self.frames = [] self.repeat_times = -1 for line in conffile.readlines(): if line.strip() != "": values = line.split() if values[0] == "repeat_times": self.repeat_times = int(values[1]) if values[0] == "frame": self.frames.append( Frame(object, anim_name, len(self.frames), int(values[2]))) self.reset() return
def wait_for_connection(self): time_start = time.time() time_since_last_try = time.time() while time.time() - time_start < transm_global_params.CONNECTION_TIMEOUT: if time.time() - time_since_last_try > transm_global_params.CONNECTION_ESTABLISHMENT_INTERVAL: if self.VERBOSE: print("sender: Trying to connect", get_time_h_m_s()) frame = Frame(None, transm_global_params.ESTABLISH_CONNECTION_CODE, False, False) self.ipc_manager_.send_to_receiver(frame) time_since_last_try = time.time() ack = self.ipc_manager_.get_from_receiver() if ack is None: continue if ack.seq_num_ == transm_global_params.ESTABLISH_CONNECTION_CODE: if self.VERBOSE: print("sender: Connection established", get_time_h_m_s()) return True return False
def work(self, input_items, output_items): inp = input_items[0] for i in range(len(inp)): data = [] for x in inp[i]: data.append(x&0xFF) data.append(x>>8) frame = Frame(data, self.frame_prev) if not frame: print "no frame" + str(frame) continue if not frame.is_broken(): self.frame_prev = frame self.conf = frame.get(SF_TYPE_CONFIG) if self.conf is not None: frame_num = self.conf.frame_num node_id = self.conf.id calibration_num = self.conf.calibration_num self.node_id.setText(str(node_id)) self.frame_num.setText(str(frame_num)) else: frame_num = 0 self.meas = frame.get(SF_TYPE_MEASUREMENTS) if self.meas is not None and self.conf is not None: temp = self.meas.temp hum = self.meas.hum_down pres = self.meas.pressure self.plot.update_figure(frame_num, temp, hum, pres) if not self.calibrated and self.conf is not None: self.calibrated = self.calib.addFragment(self.conf.calibration_num, self.conf.calibration_data) if self.calibrated: print("calibration complete at frame %s" % frame_num) calib_data = self.calib.data() self.calib = Calibration(calib_data) self.calibrated_label.setText("calibrated") self.calibrated_label.setStyleSheet("color: green") print("frame: %s %s" % (frame_num, not frame.is_broken(), )) return len(inp)
def analyze(self, start_time=0, end_time=0, is_test=False): """Main analysis process Capture frames with given video, retrieve info from each frame. All retrieved info in one frame is stored in a Frame object, then the Frame obj is pushed into array: self.frames Author: Appcell Args: None Returns: None """ video = VideoLoader(self.video_path) step = int(round(video.fps/self.analyzer_fps)) step_cnt = 0 self.is_test = is_test start_time = start_time if is_test else 0 # For testing we specify start/end time. # But for release version we don't. frame_image_index = start_time * video.fps frame_image = video.get_frame_image(frame_image_index) while frame_image is not None \ and (frame_image_index < video.frame_number and is_test is False) \ or (frame_image_index < end_time * video.fps and is_test is True): frame = Frame(frame_image, start_time + (1 / float(self.analyzer_fps)) * step_cnt, self) self.frames.append(frame) frame_image_index += step step_cnt += 1 frame_image = video.get_frame_image(frame_image_index) video.close() self.clear_all_frames() self.output_to_excel()
def find_matches_along_line(f, e, line, descriptor, radius = Parameters.kMaxReprojectionDistanceFrame, overlap_ratio = 0.5): max_descriptor_distance = 0.5*Parameters.kMaxDescriptorDistanceSearchEpipolar #print('line: ', line[0], ", ", line[1]) step = radius*(1-overlap_ratio) delta = line[1].ravel() - line[0].ravel() length = np.linalg.norm(delta) n = max( int(math.ceil(length/step)), 1) delta = delta/n #print('delta: ', delta) best_dist = math.inf best_dist2 = math.inf best_k_idx = -1 for i in range(n+1): x = line[0].ravel() + i*delta for k_idx in f.kd.query_ball_point(x, radius): # if no point associated if f.get_point_match(k_idx) is None: descriptor_dist = Frame.descriptor_distance(f.des[k_idx], descriptor) if descriptor_dist < max_descriptor_distance: if True: return k_idx, descriptor_dist # stop at the first match if descriptor_dist < best_dist: best_dist2 = best_dist best_dist = descriptor_dist best_k_idx = k_idx else: if descriptor_dist < best_dist2: best_dist2 = descriptor_dist if False: if best_dist2 < max_descriptor_distance: # stop once we have a second to best match break # N.B.: the search "segment line" can have a large width => it is better to use the match distance ratio test if best_dist < best_dist2 * Parameters.kMatchRatioTestEpipolarLine: return best_k_idx, best_dist else: return -1, 0
def _add(self, event): self.text_box.text_box.insert('end', '\n') text = self.text_box.text_box.get('1.0', 'end') text_lines = text.split('\n') while text_lines and not text_lines[-1]: text_lines.pop() item_add = text_lines[-1] self.text_box.button_add.button.pack_forget() button_frame = Frame(master=self.text_box.master, width=22, height=1, side=tkinter.TOP) self.text_box.button_frame_list.append(button_frame) idx = len(self.text_box.button_done_list) self.text_box.button_done_list.append( ButtonDone(id=idx, master=button_frame.frame, text='{}.done'.format(idx + 1), text_box=self.text_box, window=self.window)) self.text_box.button_cancel_list.append( ButtonCancel(id=idx, master=button_frame.frame, text='{}.cancel'.format(idx + 1), text_box=self.text_box, window=self.window)) self.text_box.button_add = ButtonAdd(master=self.text_box.master, text_box=self.text_box, window=self.window) with open('dashboard.txt', 'a') as f: f.write('{}\n'.format(''.join(deparser([item_add])))) self._refresh()
def on_message_arrive( self, message: bytes, ): frame = Frame(message) if frame.is_corrupt: # warning(f"corrupt frame {frame}") warning(f"corrupt frame received.") return if frame.index == self.expected_index: self.buffer = self.buffer + frame.data success(f"receive : {frame.data}") self.medium.send_ack(to_ack_bytes( self.expected_index)) # self.expected_index self.expected_index = (1 + self.expected_index) % self.max_index_mod print(f"expected next :{self.expected_index}") received = True else: warning( f"unexpected frame index: {frame.index}, expect {self.expected_index}" )
def __init__(self, source_id, **kwargs): """Accepts filename or device id. Capture arguments passed as kwargs.""" self.source = source_id self.capture = cv.VideoCapture(source_id) self.frame = Frame( ) # Frame object contains image and frame number, time self.total_frames = self.capture.get(cv.CAP_PROP_FRAME_COUNT) self.fps = self.capture.get(cv.CAP_PROP_FPS) if 'width' in kwargs: self.capture.set(cv.CAP_PROP_FRAME_WIDTH, kwargs['width']) elif 'height' in kwargs: self.capture.set(cv.CAP_PROP_FRAME_HEIGHT, kwargs['height']) elif 'fps' in kwargs: self.capture.set(cv.CAP_PROP_FPS, kwargs['fps']) elif 'start_ms' in kwargs: self.capture.set(cv.CAP_PROP_POS_MSEC, kwargs['start_ms']) # Only for video files elif 'start_frame' in kwargs: self.capture.set(cv.CAP_PROP_POS_FRAMES, kwargs['start_frame']) elif 'fourcc' in kwargs: self.capture.set(cv.CAP_PROP_FOURCC, kwargs['fourcc'])
def packet_to_frames(self, packet): frames = [] byte_array = [] byte_array += packet.id # Packet.HEADER_ID_BYTES byte_array += packet.size.to_bytes(Packet.HEADER_SIZE_BYTES, byteorder='big') byte_array += packet.receiver.to_bytes(Packet.HEADER_RECEIVER_BYTES, byteorder='big') byte_array += packet.sender.to_bytes(Packet.HEADER_SENDER_BYTES, byteorder='big') byte_array += packet.data count = int(math.ceil(len(byte_array) / Frame.PAYLOAD_BYTES)) for i in range(count): start_byte = i * Frame.PAYLOAD_BYTES frame = Frame( i, count, byte_array[start_byte:start_byte + Frame.PAYLOAD_BYTES]) frames.append(frame) return frames
def process_frame(img): img = cv2.resize(img, (W, H)) frame = Frame(mapp, img, K) if frame.id == 0: return f1 = mapp.frames[-1] f2 = mapp.frames[-2] idx1, idx2, Rt = match_frames(f1, f2) f1.pose = np.dot(Rt, f2.pose) # homogeneous 3-D coords pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2]) pts4d /= pts4d[:, 3:] # reject pts without enough "parallax" (this right?) # reject points behind the camera good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0) for i, p in enumerate(pts4d): if not good_pts4d[i]: continue pt = Point(mapp, p) pt.add_observation(f1, idx1[i]) pt.add_observation(f2, idx2[i]) for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]): u1, v1 = denormalize(K, pt1) u2, v2 = denormalize(K, pt2) cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3) cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0)) # 2-D display if disp is not None: disp.paint(img) # 3-D display mapp.display()
def load_images(self): #加载图片,图片人物框架 for direction in self.settings.hero_direction.keys() : self.stay_images[self.settings.hero_direction[direction]] = [] self.move_images[self.settings.hero_direction[direction]] = [] self.attack_images[self.settings.hero_direction[direction]] = [] self.jump_images[self.settings.hero_direction[direction]] = [] self.jump_attack_images[self.settings.hero_direction[direction]] = [] self.fire_magic_images[self.settings.hero_direction[direction]] = [] self.hurt_images[self.settings.hero_direction[direction]] = [] for weapon in range(0, self.weapon_size): png_image_path = 'images/' + str(weapon) + '_' + direction + '_stay.png' image = pygame.image.load(png_image_path) # image = image.convert_alpha() # # transparent(image) # 背景透明化 # pygame.image.save(image, png_image_path) # # self.image_to_frame[image] = Frame(image, self.settings) # 描绘人物外框 # pygame.image.save(image, png_image_path) # 保存图片 # frame = Frame(image, self.settings) # frame_path = "frames/frame/"+str(self.frame_num) + ".txt" frame_rect_path = "frames/frame_rect/" + str(self.frame_num) + ".txt" self.frame_num += 1 # with open(frame_path,'w') as file_obj: # 将人物外框事先保存在json文件中 # file_obj.write(json.dumps(frame.frame)) # 这样就不用每次开启的时候都要描绘外框 # with open(frame_rect_path,'w') as file_obj: # 减少开启的时间 # file_obj.write(json.dumps(frame.frame_rect)) with open(frame_path,'r') as file_obj: frame_frame = json.loads(file_obj.read()) with open(frame_rect_path,'r') as file_obj: frame_frame_rect = json.loads(file_obj.read()) self.image_to_frame[image] = Frame(image, self.settings, True, frame_frame, frame_frame_rect) self.stay_images[self.settings.hero_direction[direction]].append(image) self.load_image_file(direction, weapon, self.move_images, 'move_images', self.move_size[weapon]) self.load_image_file(direction, weapon, self.attack_images, 'attack_images', self.attack_size[weapon]) self.load_image_file(direction, weapon, self.jump_images, 'jump_images', self.jump_size) self.load_image_file(direction, weapon, self.jump_attack_images, 'jump_attack_images', self.jump_attack_size[weapon]) self.load_image_file(direction, weapon, self.fire_magic_images, 'fire_magic_images', self.fire_magic_size[weapon]) self.load_image_file(direction, weapon, self.hurt_images, 'hurt_images', self.hurt_size)
def Render(self, scene): if self.preparation_for_rendering or self.IsRendering(): return self.preparation_for_rendering = True # TODO: remove when there is no need for visual indication if new rendering is started self.frame = Frame(self.width, self.height) horizontal_frame_step = 16 vertical_frame_step = 16 self.JobsWithSpiralRenderPattern(scene, horizontal_frame_step, vertical_frame_step) # self.JobsWithBottomToTopRenderPattern(scene, horizontal_frame_step, vertical_frame_step) # create and start worker threads self.render_threads = [] worker_threads_count = 1 for thread_number in range(0, worker_threads_count): render_thread_args = (thread_number, self.render_queue, self.stop_rendering_event) render_thread = threading.Thread(target=RenderThread, args=render_thread_args) self.render_threads.append(render_thread) render_thread.start() self.preparation_for_rendering = False
def plotter(frames = [],rest_frame:Frame = Frame()): new_vel = [x.relative_velocity(rest_frame) for x in frames] slopes = [rest_frame.c/x for x in new_vel if x != 0] x = np.arange(-10,10,1) fig = go.Figure() for index,slope in enumerate(slopes): fig.add_trace(go.Scatter(x=x, y=slope*x, mode='lines+markers', name='{}'.format(index))) fig.add_trace(go.Scatter(x=[0 for _ in x], y=x, mode='lines+markers', name='RestFrame')) fig.update_layout(title='Minkowski Diagram: frame_vel : {}m/s'.format(rest_frame._velocity), xaxis_title='distance in 3e8 m', yaxis_title='time in 1 second', template='plotly_dark') fig.update_xaxes(zeroline=True, zerolinewidth=2, zerolinecolor='Black', range=[-10, 10]) fig.update_yaxes(zeroline=True, zerolinewidth=2, zerolinecolor='Black', range=[-10, 10]) fig.show()
async def main(): s = Sender("Sender 1", 4) r = Receiver("Receiver 1", 4) c = Channel() frames = [] for _ in range(11): f = Frame() f.source_address = "Sender 1" f.destination_address = "Receiver 1" f.packFrame() frames.append(f) t1 = asyncio.create_task(s.send(frames, c)) t2 = asyncio.create_task(r.conn(c)) await t1 await t2
def run(fp, filename): file_contents = "" read = os.read(fp, 4096) while len(read) > 0: file_contents += read read = os.read(fp, 4096) os.close(fp) program = parse(file_contents) if len(program) > 0 and program[0][0] == BYTECODE_HEADER: del program[0] else: raise InvalidBytecodeFileException( "Cannot execute Aver Bytecode file. Please ensure that the file you're trying to run was generated by the compiler", filename) i = 0 functions = [None] * 1024 while i < len(program): ops = program[i] opcode = int(ops[0]) if opcode == OpCode.MAKE_FUNCTION: name = int(ops[1]) params = int(ops[2]) func, bytecodes_length = make_function(name, params, program, i + 1) functions[name] = func i += bytecodes_length i += 1 functions = [func for func in functions if func is not None] main_func = functions[0] init_frame = Frame(None, main_func, main_func.num_locals, main_func.literals, main_func.stack_size) vm = VM(functions) vm.run(init_frame)
def test_close_frame(self): # masked dummy_payload_masked = b'Endpoint shutting down' objFrame = Frame(1, Frame.cls_frame, dummy_payload_masked, _masked=True) self.assertEqual(objFrame.opcode, Frame.cls_frame) self.assertEqual(objFrame.FIN, 1) self.assertTrue(objFrame.isMasked()) self.assertEqual(objFrame.payload_len, len(dummy_payload_masked)) self.assertEqual(objFrame.payload, dummy_payload_masked) # not masked dummy_payload_unmasked = b'Received frame too large' objFrame = Frame(1, Frame.cls_frame, dummy_payload_unmasked, _masked=False) self.assertEqual(objFrame.opcode, Frame.cls_frame) self.assertEqual(objFrame.FIN, 1) self.assertFalse(objFrame.isMasked()) self.assertEqual(objFrame.payload_len, len(dummy_payload_unmasked)) self.assertEqual(objFrame.payload, dummy_payload_unmasked)
def format(self, bytes): payload = None header = None if ((type(bytes) == list) or (type(bytes) == bytearray)): if (len(bytes) >= 14): headerBytes = bytes[3:13] header = LocalHeaderFormatter().format(headerBytes) if (header == None): raise ValueError( "@LocalFrameFormatter: the header is empty!") else: if (len(bytes) > 14): tempBodyBytes = len(bytes) - 1 BodyBytes = bytes[13:tempBodyBytes] payload = self.__getCurrentPayloadObject( header.getParameter("packetID"), BodyBytes) else: raise ValueError( "@LocalFrameFormatter: The length of bytes is not enough!") else: raise ValueError( "@LocalFrameFormatter: Require List object of bytes") #At this point the header is not none and the body can be emptycontent or a real content. return Frame(Packet(header, payload), bytes[1])
def process_frame(img): img = cv2.resize(img, (W, H)) frame = Frame(mapp, img, K) if frame.id == 0: return f1 = mapp.frames[-1] f2 = mapp.frames[-2] idx1, idx2, Rt = match_frames(f1, f2) f1.pose = np.dot(Rt, f2.pose) # Find 3-D Coordinates pts4d = triangulate(f1.pose, f2.pose, f1.pts[idx1], f2.pts[idx2]) pts4d /= pts4d[:, 3:] print(pts4d) good_pts4d = (np.abs(pts4d[:, 3]) > 0.005) & (pts4d[:, 2] > 0) for i, p in enumerate(pts4d): if not good_pts4d[i]: continue pt = Point(mapp, p) pt.add_observation(f1, idx1[i]) pt.add_observation(f2, idx2[i]) print("{} matches".format(len(f1.pts[idx1]))) for pt1, pt2 in zip(f1.pts[idx1], f2.pts[idx2]): u1, v1 = denormalize(K, pt1) u2, v2 = denormalize(K, pt2) cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3) cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0)) disp.paint(img) mapp.display()
def covariance_matrix_calc(mix_model, motion_patterns, xmin, xmax, ymin, ymax): # Takes in the dictionary patt_frame_dict for pattern - frames (key - value pairs) # motion patterns (list with motion pattern info) # Returns the covariance matrix calculated for each pattern based on their corresponding frames for i in range(0, len(motion_patterns)): # for pattern in motion_patterns: pattern_num = i #print("mixmodel.partition", mix_model.partition) # number of frames assigned to the pattern pattern_num = np.argmax(np.array(mix_model.partition)) rate = np.array(mix_model.partition)[i] / mix_model.n frame_pattern_ink = mix_model.frame_ink(pattern_num, 0, True) # construct mesh frame x = np.linspace(xmin, xmax, 31) y = np.linspace(ymin, ymax, 31) [WX, WY] = np.meshgrid(x, y) WX = np.reshape(WX, (-1, 1)) WY = np.reshape(WY, (-1, 1)) frame_field = Frame(WX.ravel(), WY.ravel(), np.zeros(len(WX)), np.zeros(len(WX))) #get posterior ux_pos, uy_pos, covx_pos, covy_pos = mix_model.b[ pattern_num].GP_posterior(frame_field, frame_pattern_ink, True) return [WX, WY, ux_pos, uy_pos]
def analyze(self, start_time=0, end_time=0): """Main analysis process Capture frames with given video, retrieve info from each frame. All retrieved info in one frame is stored in a Frame object, then the Frame obj is pushed into array: self.frames Author: Appcell Args: None Returns: None """ video = VideoLoader(self.video_path) step = int(round(video.fps / self.analyzer_fps)) frame_image_index = start_time * video.fps frame_image = video.get_frame_image(frame_image_index) step_cnt = 0 # while frame_image is not None and frame_image_index < end_time * video.fps: while frame_image is not None and frame_image_index < video.frame_number: # cv2.imshow('t', frame_image); # cv2.waitKey(0) frame = Frame( frame_image, start_time + (1 / float(self.analyzer_fps)) * step_cnt, self) self.frames.append(frame) frame_image_index += step step_cnt += 1 frame_image = video.get_frame_image(frame_image_index) video.close() self.clear_all_frames() self.output_to_excel()
def __init__(self): # Это здесь нужно для доступа к переменным, методам # и т.д. в файле design.py super().__init__() self.setupUi(self) # Это нужно для инициализации нашего дизайна self._frames = {} self._scheme = Frame.loadFromDb() self._params = self._scheme._children_[param_string] self._profs = self._scheme._children_[job_string] # Загрузить список фреймов self.getAll() self.get_all_btn.clicked.connect(self.getAll) self.frame_table.itemSelectionChanged.connect(self.printSlots) self.slot_table.itemDoubleClicked.connect(self.updateSlot) self.add_alg_btn.clicked.connect(self.addFrame) self.del_alg_btn.clicked.connect(self.removeFrame) self.set_slot_btn.clicked.connect(self.editSlot) self.search_name_btn.clicked.connect(self.searchName) self.search_slot_btn.clicked.connect(self.searchSlot)
def __init__(self): self._max_window = 20 # 滑动窗口中的frame集合 self._frames_DB = [] # 滑动窗口中mappoint集合,里面元素为字典(描述子->Mappoints类) self._mappoints_DB = {} self._state = np.array([]) self._descriptor2state = {} self._frameid2state = {} self._jacobi = np.array([]) self._error = np.array([]) self._measure = Measure() self._prior_matrix = np.array([]) self._prior_matrixb = np.array([]) self._lastframe = Frame(0) self._coefficient = [[], []] self._measure_count = 0 # draw self._esti_pose = [[],[]] self._f2ftrack = [] self._f2ftrack_show = [[],[]] self._slideframes = [[], []] self._slidepoints = [[],[]]
def getStackTrace(self): rawOutputClean = self._callWinDbg('kcn') rawOutputExtended = self._callWinDbg('kpn') frames = [] for idx in range(MAX_STACK_DEPTH): x = self._getLineAndWarningForStackTrace(idx, rawOutputClean) if not x: break stackLine, warning = x moduleAndFunction = stackLine.split()[1] if '!' in moduleAndFunction: module, function = moduleAndFunction.split('!') else: module = moduleAndFunction function = None x = self._getLineAndWarningForStackTrace(idx, rawOutputExtended) extendedStackLine, warning = x match = re.findall(REGEX_FILE_AND_LINE_FROM_FRAME, extendedStackLine) if match: sourceFile, line = match[0] else: sourceFile = None line = None variables = self._getVariablesForFrame(idx) f = Frame(module, idx, function, sourceFile, line, variables=variables, warningAboutCorrectness=warning) frames.append(f) threadId = self._getThreadId() return Stack(frames, threadId)
def deserialize(self, s): ret = json.loads(s) self.max_frame = ret['max_frame'] self.max_point = ret['max_point'] self.points = [] self.frames = [] pids = {} for p in ret['points']: pp = Point(self, p['pt'], p['color'], p['id']) self.points.append(pp) pids[p['id']] = pp for f in ret['frames']: ff = Frame(self, None, f['K'], f['pose'], f['id']) ff.w, ff.h = f['w'], f['h'] ff.key_points = np.array(f['kpus']) ff.descriptors = np.array(f['descriptors']) ff.pts = [None] * len(ff.key_points) for i, p in enumerate(f['pts']): if p != -1: ff.pts[i] = pids[p] self.frames.append(ff)
async def recv(self, storage_dict, lock, channel: 'Channel'): await asyncio.sleep(1) print(self.name, "\t:\tWaiting for frame") await lock.acquire() try: val: Frame = await asyncio.wait_for(channel.transmit( storage_dict, self.name), timeout=1) await asyncio.sleep(5) print("Frame was received! Sending ACK...") ack = Frame() ack.source_address = self.name ack.destination_address = val.source_address ack.setData('1') print(ack.getData()) await asyncio.wait_for(channel.place(storage_dict, ack.destination_address, ack, lock), timeout=1) # await asyncio.wait_for(self.send(ack, storage_dict, lock, channel), timeout=0.05) except asyncio.TimeoutError as e: print(self.name, '\t:\tTimed Out!', e) lock.release()
def morph(begin, end, pt, corner, factor): """ interpolates a new frame between two given frames 'begin and 'end' putting the given 'corner' of the new frame's rectangle to point 'pt'. 'factor' is the position bewteen begin (0.0) and end (1.0). """ result = Frame() # calculate current size size = fade(begin.size(), end.size(), factor) # calculate current rectangle result.rect = [ pt[X] if corner[X] is L else pt[X] - size[X], pt[Y] if corner[Y] is T else pt[Y] - size[Y], pt[X] if corner[X] is R else pt[X] + size[X], pt[Y] if corner[Y] is B else pt[Y] + size[Y], ] # calculate current alpha value and cropping result.alpha = fade(begin.alpha, end.alpha, factor) result.crop = fade(begin.crop, end.crop, factor) # copy orignial size from begin result.original_size = begin.original_size return result
def simulator2(RUN_COUNT=100): f = Frame() FRAME_TIME = 10 player1 = Player.create(f, 0, 0) player2 = Player.create(f, 0, 0) gc = Client(f, player1, player2, 5) print(player1.id, player2.id) def update(): gc.update() print("player1: (%.1f, %.1f), player2: (%.1f, %.1f)" % (gc.players[0].x, gc.players[0].y, gc.players[1].x, gc.players[1].y)) f.cvs.after(FRAME_TIME, update) f.cvs.after(RUN_COUNT * FRAME_TIME, f.quit) update() f.pack(fill=Tk.BOTH, expand=1) f.mainloop() return player1, player2
def run(self): for event in pg.event.get(): if event.type == pg.VIDEORESIZE: pg.display.set_mode(event.size, pg.RESIZABLE) break g.init() pg.font.init() self.heading = pg.font.Font(None, 96).render("Tic - Tac - Toe", True, g.WHITE) self.reset_text = pg.font.Font(None, 56).render("Reset", True, g.WHITE) self.question_text = pg.font.Font(None, 72).render("?", True, g.WHITE) self.close_text = pg.font.Font(None, 64).render("X", True, g.WHITE) self.help_text = [ pg.font.Font(None, 36).render( i, True, g.WHITE, ) for i in ( "Each player takes it in turn to place their X or O", "into one of the empty squares in the grid by clicking on it.", "To win the game get three of your symbols in a line", "horizontally, vertically or diagonally", ) ] self.help_pos = pg.Rect( (3 * g.WIDTH + g.FRAME_GAP * 3) // 4 - 40, (g.HEIGHT * 0.5 - g.FRAME_GAP * 1.5) // 2 - 40, 80, 80, ) self.reset_rect = pg.Rect( g.WIDTH / 2 - self.reset_text.get_width() / 2, g.HEIGHT - self.reset_text.get_height() - 80, self.reset_text.get_width(), self.reset_text.get_height() + 20, ) self.font = pg.font.Font(None, 72) self.cross_ui = Animate(self, color=g.ORANGE).cross( ((g.WIDTH - g.FRAME_GAP * 3) / 4, g.HEIGHT / 2 - g.FRAME_GAP / 4), 43, 11 ) self.circle_ui = Animate(self, color=g.RED).circle( (g.WIDTH - (g.WIDTH - g.FRAME_GAP * 3) / 4, g.HEIGHT / 2 - g.FRAME_GAP / 4), 40, 8, ) if self.canvas is not None: self.canvas.grab_focus() self.frame = Frame(self, (g.WIDTH / 2, g.HEIGHT / 2)) self.clock = pg.time.Clock() while self.running: if self.journal: # Pump GTK messages. while Gtk.events_pending(): Gtk.main_iteration() self.check_events() self.draw() self.clock.tick(g.FPS) pg.display.quit() pg.quit() sys.exit(0)