def blocks(): while True: piece = self.session.get_piece_request(self.have_pieces) LOG.info('[{}] generating blocks for piece {}'.format( self, piece)) for block in piece.blocks: yield block
def main(): LOG.setVerbose() # configure renderer log.info('creating renderer') renderer = render.Renderer() renderer.cpoly = vertex.VertexObject(BUFSIZE) renderer.dimension = 500 renderer.gpu = False # create window log.info('creating window') window = display.Window('Bezier Splines') window.renderer = renderer window.handler = Handler(window) # configure shader renderer.shader.vertex = 'shader/std.vert' renderer.shader.geometry = 'shader/bezier.geom' renderer.shader.fragment = 'shader/std.frag' renderer.shader.compile() # appearance renderer.background = (.2, .2, .2, 0.) renderer.cpolyColor = (1., .6, 0., 0.) renderer.splineColor = (0., .6, 1., 0.) window.show()
def on_block_received(self, piece_index, begin, data): '''Implements writing off pieces after download is finished Removes pieces from `self.pieces` \n Verifies Piece hash \n Sets `self.have_pieces[piece_index]` to True if hash verifies \n Else reinserts piece to `self.pieces` ''' piece = self.piece[piece_index] piece.save_block(begin, data) if not piece.is_complete(): return piece_data = piece.data res_hash = hashlib.sha1(piece_data).digest() exp_hash = self.torrent.get_piece_hash(piece_index) self.pieces_in_progress.pop(piece.index) # I added this if res_hash != exp_hash: # todo - re-enqueue request for this piece LOG.info('Hash check failed for piece {}'.format(piece_index)) piece.flush() return else: import pdb; pdb.set_trace() LOG.info('Hash for piece {} is valid'.format(piece_index)) self.received_pieces[piece.index] = piece # I added this. self.received_blocks.put_nowait((piece_index*self.piece_size, data))
def runOS(cmd, debug=DBG): """ run a os.system(cmd) """ LOG.debug(cmd) if not debug: os.system(cmd)
def note_on(track, ptr, ev): poly_id = ptr.u8() note = ev velocity = ptr.u8() track.insert(NoteOn, poly_id=poly_id, note=note, velocity=velocity ) track.touched[poly_id] = True # Note history... if poly_id not in range(1, 8): LOG.error('Invalid poly_id at %06X = %02X (%02X v=%02X) %s', track._prev_addr, poly_id, note, velocity, track.note_history) else: old = track.note_history[poly_id] if isinstance(old, int): LOG.error('Double note_on at %06X = %02X (%02X v=%02X) %s', track._prev_addr, poly_id, note, velocity, track.note_history) track.note_history[poly_id] = note
def get_file_path(self, outdir, torrent): name = torrent[b'info'][b'name'].decode() file_path = os.path.join(outdir, name) if os.path.exists(file_path): # TODO: add (num) to file name LOG.info('Previous download exists') return file_path
def from_ptr(cls, ptr: Pointer): d = cls() d.tracknum = ptr.u8() d.addr = ptr.u24() LOG.warning('Track %d', d.tracknum) return d
def run(self): """ run each of the manager in its given thread """ for key in self.managers: LOG.info('starting SouthBoundExtended for %s' % key) tmp_th = threading.Thread(target=self.managers[key].run) tmp_th.start()
async def download(self): retries = 0 while retries < 5: retries += 1 try: await self._download() except asyncio.TimeoutError: LOG.warning('Timed out connecting with: {}'.format(self.host))
async def get_peers(self): peers_resp = await self.request_peers() if b'failure reason' in peers_resp: # I added this LOG.error('Request to tracker for Peers failed with reason : {}'. peers_resp[b'failure reason']) return peers = self.parse_peers(peers_resp[b'peers']) return peers
def run_process(cmd, debug=DBG): """ run the cmd """ LOG.debug(cmd) if not debug: p = Popen(cmd, shell=True, stdout=PIPE) p.wait() return p.stdout.read()
async def start(self): while True: block = await self.received_blocks_queue.get() if not block: LOG.info('Received poison pill.Exiting') block_abs_location, block_data = block os.lseek(self.fd, block_abs_location, os.SEEK_SET) os.write(self.fd, block_data)
def do_next_timestamp(self, line): """ increase the Timestamp """ # global Timestamp if self.Timestamp + 1 < len(self.TIME): self.Timestamp += 1 else: LOG.error('No more Timestamp available')
async def download(self): retries = 0 while retries < 5: retries = retries + 1 try: await self._download() except asyncio.TimeoutError: LOG.warning("Timed out connection with host {}".format( self.host))
def do_dump_database_router(self, router): """ Run a tcpdump on the router """ if router not in self.network.keys(): LOG.error('Router %s does not exists' % router) else: Router = self.network[router] File = 'database_dump_%s.txt' % router Router.cmd('python telnet.py > %s' % File)
def do_tcpdump_router(self, router): """ Run a tcpdump on the router """ if router not in self.network.keys(): LOG.error('Router %s does not exists' % router) else: tcpdump = threading.Thread(target=self.tcpdump_router_th, args=(router, )) tcpdump.start()
def is_ok_path_req(req): """ checks if path of requirement is ok """ for i in range(len(req)): if i != len(req) - 1: if req[i] == '*' and req[i + 1] == '*': LOG.error('Cannot have two consecutive * in requirement') return False return True
async def request_a_piece(self, writer): if self.inflight_requests > 1: return blocks_generator = self.get_blocks_generator() block = next(blocks_generator) LOG.info('[{}] Request Block: {}'.format(self, block)) msg = struct.pack('>IbIII', 13, 6, block.piece, block.begin, block.length) writer.write(msg) self.inflight_requests += 1 await writer.drain()
def xiaoyong(): if flask.request.method == 'GET': return verify_request(flask.request, 'get request failed') elif flask.request.method == 'POST': if not verify_request(flask.request): return 'post request failed' flask.request.get_data() raw_xml = flask.request.data LOG.info(raw_xml) reqmsg = parse_xml_message(raw_xml) rspmsg = process_reqmsg(reqmsg) return rspmsg
def get_time(self): """ OVERRIDE return the time value NB: for simulation override this function to fast forward in time """ # global Timestamp if self.Timestamp <= len(self.TIME): return self.TIME[self.Timestamp] else: LOG.error('TIME out of bound') return 0
def _start(self): """ for each of the controllers, read config CFG and build the corresponding SouthBoundExtended manager """ for ctrl_name in self.controllers: # CFG.read(pathtoREScfg) CFG_p = '/tmp/%s.cfg' % ctrl_name # CFG.read(CFG_p) LOG.info('creating SouthBoundExtended for %s...' % ctrl_name) self.managers[ctrl_name] = SouthBoundExtended(cfg=CFG_p) time.sleep(10)
def child(track, ptr): tracknum = ptr.u8() addr = ptr.u24() LOG.warning('Track %d', tracknum) # Symlink track.insert(Child, tracknum=tracknum, addr=addr ) BmsTrack(track._file, addr, tracknum, None).parse()
def checkRequirementConfig(d, d_req): config = d['config'] fibbing_ctrl = config.get('fibbing-controller') hosts_link = config.get('hosts-link') links = config.get('links') main_controller = config.get('main-controller') ospf_links = config.get('ospf-links') routers = config.get('routers') conf = d_req['config'] link = conf["link"] Error = False # Set up the Controller requirements for x in link: # set up router connected to dest as last one of the path rs = x.get('requirement')[-1] if rs == '*': LOG.error('The Path cannot end with the * router') Error = True continue ds = str(x.get('dest')) R = () for r in x.get('requirement'): R += (str(r), ) IS_OK = False for dsr in hosts_link: host = dsr.get('host') router = dsr.get('router') # check if dest is indeed connected to router # in the topology if host.get('name') == ds and router.get('name') == rs: IS_OK = True if not IS_OK: LOG.warning('Destination ' + ds + ' is not connected to ' + ds) if str(x.get('status')) == SCHED: start_hour = x.get('scheduled').get('start-hour') end_hour = x.get('scheduled').get('end-hour') tmp = compare(start_hour, end_hour) if (tmp == False): Error = True LOG.error('start-hour must be smaller than end-hour') if str(x.get('status')) == SCHED and\ (str(x.get('scheduled').get('type')) == BAND or\ str(x.get('scheduled').get('type')) == BACK): if not x.get('scheduled').get('link').get('from') or \ not x.get('scheduled').get('link').get('to') : Error = True LOG.error( 'router from and to of link bandwidth or backup not set') Error = check_path(links, Error, link) return Error
def check_ip(base_net, private_net, ip, Error): ip_pfx = str(ip.split('/')[0]) + '/8' ip_intf = ipaddress.ip_interface(unicode(ip_pfx)) if private_net == str(ip_intf.network): LOG.error('IP address has same prefix than private_net :' + str(ip)) Error = True ip_pfx = str(ip.split('/')[0]) + '/16' ip_intf = ipaddress.ip_interface(unicode(ip_pfx)) if base_net == str(ip_intf.network): LOG.error('IP address has same prefix than base_net :' + str(ip)) Error = True return Error
def remove_requirement(self, name, prefix): """ @API :name = unique identifier of a requirement :prefix = destination prefix of the requirement """ if not self.simple_req.get(prefix): LOG.critical("No requirement for this prefix : " + str(prefix)) else: index = self.get_index_by_name(name, prefix) if index != -1: del self.simple_req[prefix][index] self.change_pfx.append(prefix) else: LOG.critical("No prefix has this name : " + str(name))
def initialize(args): sys.setrecursionlimit(10000) lg = rdkit.RDLogger.logger() lg.setLevel(rdkit.RDLogger.CRITICAL) torch.manual_seed(args.seed) torch.cuda.set_device(args.device) arg_info = '_%s_LR_%f_HS_%d_RS_%d_AR_%.2f_AI_%d_%s' % ( args.task_tag, args.lr, args.hidden_size, args.rand_size, args.anneal_rate, args.anneal_interval, 'share_embedding' if args.share_embedding else 'not_share_embedding') LOG.init(file_name=current_time + '_' + arg_info) logger = logging.getLogger('logger') logger.info(args) # create the tensorboard log saved folder if not os.path.isdir(args.tensorboard_save_dir): os.makedirs(args.tensorboard_save_dir) # set the tensorboard writer train_tb_log_dir = os.path.join(args.tensorboard_save_dir, current_time + '_' + arg_info + '_train') tb_suffix = '_' + arg_info train_writer = SummaryWriter(log_dir=train_tb_log_dir, filename_suffix=tb_suffix) # create the model saved folder if args.model_save_dir is not None: # args.model_save_dir = os.path.join(args.model_save_dir, current_time + '_' + arg_info) args.model_save_dir = os.path.join(args.model_save_dir, f'{args.task_tag}') if not os.path.isdir(args.model_save_dir): os.makedirs(args.model_save_dir) # save the model config with open(os.path.join(args.model_save_dir, 'model_config.json'), 'w') as f: json.dump( { 'vocab': args.vocab, 'hidden_size': args.hidden_size, 'rand_size': args.rand_size, 'share_embedding': args.share_embedding, 'use_molatt': args.use_molatt, 'depthT': args.depthT, 'depthG': args.depthG }, f) return logger, train_writer
def after(self, track: BmsTrack): addr = self.addr hist = track._ptr.hist(addr) if hist == Visit.MIDDLE: raise OverlapError('%s %s - middle of command' % (getname(self), hex(addr))) # We are either visiting NONE or BEGIN. # Not visited: # Visit target. # Visited: # (addr) Check preconditions and apply postconditions. # Note that OldNote compares == None. if hist == Visit.NONE: track = BmsTrack(track._file, addr, None, track.note_history).parse() else: assert hist == Visit.BEGIN # **** ASSERTION **** # We will encounter issues if any touched notes differ from the original call. # TODO: We assume we're not calling into the middle of a visited section... :'( track = track._file.track_at[addr] different = (track.pre_history != track.note_history) if any(different & track.touched): LOG.error( '''Invalid call from %06X to %06X new:%s old:%s out:%s touched:%s''', track._prev_addr, addr, track.note_history, track.pre_history, track.note_history, track.touched) # endif already visited # BMS really shouldn't be touching notes, AND leaving running notes untouched... if any(track.touched): untouched = ~track.touched if any(track.pre_history.astype(bool) & untouched): LOG.warning('Function leaves running notes untouched %s %s %s', track.touched, track.pre_history, track.note_history) # Knowing that all "touched" entries entered identically... # We must apply all "touched" entries of the note history. track.note_history[track.touched] = track.note_history[track.touched]
def grace_full_shutdown(self): """ Shutdown the App, by first closing the controller and then closing the Network """ if self.controllerrunning: self.do_stop_controller("Stopping controller...") time.sleep(5) # waiting for shapeshifter if self.running: self.do_stop_network('Stopping Network...') # shutdown the daemon thread self.shutdown() LOG.info('Stopping the App...')
def control_change(track, ptr, ev) -> ControlChange: # u8 cctype, [layout/4] value, [layout%4] duration # duration # val | 0 ? u8 u16 # -----+---- ---- ---- ----- # u8 | 94 95 96 97 # s8 | 98 99 9a 9b # s16 | 9c 9d 9e 9f layout = ev - 0x94 # READ CCTYPE cctype = ptr.u8() # READ VALUE values = [ptr.u8, ptr.s8, ptr.s16] value = values[layout // 4]() # READ LENGTH durations = [lambda: 0, None, ptr.u8, ptr.u16] duration = durations[layout % 4] if duration is not None: duration = duration() else: duration = None LOG.error('Unknown event 0x%s', b2h(ev)) # TODO: missing CCTYPEs try: cctype = BMS2CC[cctype] except KeyError: LOG.info('[CC %s] Unknown control change %s = %s', b2h(ev), b2h(cctype), value) cctype = 'unknown %s' % b2h(cctype) # LOG.info('control change - %s', cctype) # # ', %02X, %02X', value, duration # LOG.info(' value %s', 'u8 s8 s16'.split()[layout // 4]) # LOG.info(' duration %s', '0 ? u8 u16'.split()[layout % 4]) track.insert(ControlChange, cctype=cctype, value=value, length=duration )
def checkRequirementConfig(network, requirement): """ Check the config received from the ConfD agent, concerning the requirement Input : network is the dictionary that contains the network config, requirement is the dictionary that contains the requirements config, return False if no Error was found, retrun True if at least one Error was found """ Error = False conf = requirement['config'] link = conf["link"] for x in link: if x.get('requirement')[-1] == '*': LOG.error('Path of requirement cannnot end by *') continue rs = x.get('requirement')[-1] ds = str(x.get('dest')) IS_OK = False for lk in network.get('link'): src = lk.get('src') dest = lk.get('dest') # check if dest is indeed connected to router # in the topology if (src.get('name') == ds and dest.get('name') == rs)\ or (src.get('name') == rs and dest.get('name') == ds) : IS_OK = True if not IS_OK: LOG.warning('Destination ' + ds + ' is not connected to ' + rs) if str(x.get('status')) == SCHED: start_hour = x.get('scheduled').get('start-hour') end_hour = x.get('scheduled').get('end-hour') tmp = compare(start_hour, end_hour) if (tmp == False): Error = True LOG.error('start-hour must be smaller than end-hour') if str(x.get('status')) == SCHED and\ (str(x.get('scheduled').get('type')) == BAND or\ str(x.get('scheduled').get('type')) == BACK): if not x.get('scheduled').get('link').get('from') or \ not x.get('scheduled').get('link').get('to') : Error = True LOG.error( 'router from and to of link bandwidth or backup not set') Error = check_path(network.get('link'), Error, link) return Error
def check_path(ospf_links, error, requirement): edge = [] for x in ospf_links: src = x.get('src') dest = x.get('dest') edge.append((src.get('name'), dest.get('name'))) edge.append((dest.get('name'), src.get('name'))) for req in requirement: if '*' not in req.get('requirement'): for (s, d) in zip( req.get('requirement')[:-1], req.get('requirement')[1:]): if (s, d) not in edge: error = True LOG.error('Unvalid path') return error
async def request_peers(self): async with aiohttp.ClientSession() as session: resp = await session.get(self.tracker_url, params=self._get_request_params()) resp_data = await resp.read() LOG.info('Tracker response: {}'.format(resp)) LOG.info('Tracker response data: {}'.format(resp_data)) peers = None try: peers = bencoder.decode(resp_data) LOG.info('Tracker response data bdecoded: {}'.format(peers)) except AssertionError: LOG.error( 'Failed to decode Tracker response: {}'.format(resp_data)) LOG.error('Tracker request URL: {}'.format( str(resp.url).split('&'))) raise RuntimeError('Failed to get Peers from Tracker') return peers
def __init__(self, file: BmsFile, addr:int, tracknum: Union[int, None], note_history: 'Union[array[int], None]'): super().__init__() # Initialize stuff. self._file = file # type: BmsFile self._data = file._data self._ptr = Pointer(self._data, addr, file._visited) # Insert track into file. if tracknum is not None: if tracknum in file.tracks: LOG.error('Duplicate track %d', tracknum) file.tracks[tracknum] = self # Insert track at address. file.track_at[addr] = self # Initialize track. self.type = 'track' self.tracknum = tracknum self.addr = addr self.segment = [] # type: List[BmsType] # Note history (for note_off commands) if note_history is None: self.note_history = [None] * 8 else: self.note_history = note_history[:] self.note_history = array(self.note_history) # Does this track modify note_history? # True whenever notes are started/stopped. self.touched = array([False] * 8) # HISTORY_MODE == 'LAZY' self.pre_history = np.copy(self.note_history)
def note_off(track, ptr, ev): poly_id = ev % 8 # magrittr %<>% ? # Expression object for track._note_history[poly_id] ? # Python sucks. note = track.note_history[poly_id] track.insert(NoteOff, note=note, poly_id=poly_id) track.note_history[poly_id] = OldNote(note) track.touched[poly_id] = True # Error checking... if not isinstance(note, int): LOG.error('Double note_off at %06X = %02X %s', track._prev_addr, poly_id, track.note_history)
def commit_change(self): """ @API commit the changes, and applied the requirements entered for the current session """ for prefix in self.change_pfx: tmp = [] if not self.simple_req[prefix]: self.remove_dag_requirement(prefix) else: for item in self.simple_req[prefix]: for s, d in zip(item.path[:-1], item.path[1:]): if (s, d) not in tmp: tmp.append((s, d)) LOG.debug('add_dag_requirement') self.add_dag_requirement(prefix, IGPGraph(tmp)) del self.change_pfx[:] self.refresh_augmented_topo()
async def download(t_file : str, download_loc : str, loop=None): '''Entry point for client, initializes `Peers` and `DownloadSession` according to configureation present in `t_file` which is the .torrent file and saves the downloaded file to `download_loc` directory''' torrent = Torrent(t_file) LOG.info('Torrent : {}'.format(torrent)) torrent_writer = FileSaver(download_loc, torrent) session = DownloadSession(torrent, torrent_writer.get_received_blocks_queue()) tracker = Tracker(torrent) # implement Tracker class peer_info = await tracker.get_peers() seen_peers = set() peers = [Peer(session, host, port) for host, port in peer_info] seen_peers.update([str(p) for p in peers]) LOG.info('Peers : {}'.format(seen_peers)) asyncio.gather([peer.download() for peer in peers])
def initialize(args): current_time = '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.now()) sys.setrecursionlimit(10000) lg = rdkit.RDLogger.logger() lg.setLevel(rdkit.RDLogger.CRITICAL) arg_info = '%s_%s_HS_%d_RS_%d' % ( args.task_tag, args.metric_type, args.hidden_size, args.rand_size ) LOG.init(file_name=current_time + '_Evaluation' + '_' + arg_info) logger = logging.getLogger('logger') logger.info(args) torch.cuda.set_device(args.device) torch.manual_seed(args.seed) return logger
def bms_iter(self): # TODO: Do we use segment-based or linked-list iteration? # Segment-based iteration loses event address information! it = iter(self.segment) addr = self.addr while 1: ev = self._file.at[addr] # type: BmsType ev_seg = next(it) if ev != ev_seg: LOG.error('BmsIter != segment at address %06X', addr) yield ev if 'next' in ev: addr = ev.next else: break
def instr_change(track, ptr, ev): # (ev), ev2, value # ev=width, ev2=type if ev == 0xA4: get = ptr.u8 LOG.info('instr_change u8') elif ev == 0xAC: get = ptr.u16 LOG.info('instr_change u16') else: assert False ev2 = ptr.u8() value = get() if ev2 == 0x20: ev2 = 'bank' elif ev2 == 0x21: ev2 = 'patch' else: LOG.warning('[instr %s] Unknown type %02X value=%02X', b2h(ev), ev2, value) ev2 = 'unknown %s' % b2h(ev2) track.insert(InstrChange, ev2=ev2, value=value)
def Simplify(self, Req): """ @API Compute the minimum LabelsStack corresponding to :Req, based on view of the network """ LOG.debug('Simplifying %s' % str(Req)) Current_req = self.RewriteReq(Req) Finalsrc = Req[0] Finaldst = Req[-1] src = Finalsrc dst = Finaldst LabelsStack = list() finish = True while (finish): node = self.find_segment(Current_req, src, dst) if (node != False): if (node[0] != Finaldst or node[1] == 'adjacency'): attribute = node[1] newDest = node[0] dico = {'type': attribute} if (attribute == 'adjacency'): dico['prev'] = src LabelsStack.append((newDest, dico)) src = node[0] dst = Finaldst src_index = Req.index(src) dst_index = Req.index(dst) Current_req = Req[src_index:dst_index + 1] Current_req = self.RewriteReq(Current_req) else: finish = False if node[0] == Finaldst: finish = False else: Current_req.pop() dst = Current_req[-1][-1] LOG.debug('output: %s' % str(LabelsStack)) return LabelsStack
def after(event, file: 'BmsFile', ptr: Pointer): addr = event.addr hist = ptr.hist(addr) if hist == Visit.NONE: track = BmsTrack(track._file, addr, None, track.note_history).parse() else: assert hist == Visit.BEGIN # **** ASSERTION **** # We will encounter issues if any touched notes differ from the original call. # TODO: We assume we're not calling into the middle of a visited section... :'( track = track._file.track_at[addr] different = (track.pre_history != track.note_history) if any(different & track.touched): LOG.error( '''Invalid call from %06X to %06X new:%s old:%s out:%s touched:%s''', track._prev_addr, addr, track.note_history, track.pre_history, track.note_history, track.touched) # endif already visited # BMS really shouldn't be touching notes, AND leaving running notes untouched... if any(track.touched): untouched = ~track.touched if any(track.pre_history.astype(bool) & untouched): LOG.warning('Function leaves running notes untouched %s %s %s', track.touched, track.pre_history, track.note_history) # Knowing that all "touched" entries entered identically... # We must apply all "touched" entries of the note history. track.note_history[track.touched] = track.note_history[track.touched]
def main(argv): """ Program entry point. This function blocks. :param argv: sys.argv (or some mock) :returns: None :rtype: None """ delim = '\n' + '- ' * 40 args = parse_args() if args.verbose: logger.setVerbose() if args.trace: logger.setTrace() # determine window size ratio = map(int, args.res.split('x')) # # INITIALIZE GLUT # cwd = os.getcwd() glt.glutInit([]) os.chdir(cwd) glt.glutInitDisplayMode( glt.GLUT_DEPTH | glt.GLUT_DOUBLE | glt.GLUT_RGB) glt.glutInitWindowSize(*ratio) glt.glutCreateWindow("Rendering %s" % args.filename.rstrip('.obj')) # # CREATE SCENE # log('initializing scene:%s' % delim) scene = render.Scene.Instance() scene.setShading(args.shading) scene.callback = glt.glutSwapBuffers scene.repaint = glt.glutPostRedisplay # alter appearance scene.background = util.Color().hex(0x33333300) light = scene.createLight() light.position = 0.5, 2., 0.5 # create camera cam = render.Camera.Instance() cam.fow, cam.ratio = args.fow, ratio cam.mode, cam.offset = cam.ORTHOGONALLY, 1 scene.camera = cam # create entities log('parsing and inintializing geometries:%s' % delim) for obj in parser.ObjParser(args.filename).objects: polyhedron = geometry.Polyhedron(obj) ent = scene.addEntity(polyhedron) ent.material.ambient = util.Color().rgba(0, 123, 255, 0.5) # ent.material.specular = .2, .2, .2, .5 # ent.material.shininess = 100 # # BIND HANDLER # trace('listening:%s' % delim) glt.glutMouseFunc(scene.evt.mouseClicked) glt.glutMotionFunc(scene.evt.mouseMoved) glt.glutKeyboardFunc(scene.evt.keyPressed) glt.glutReshapeFunc(scene.evt.reshape) # dispatch glt.glutDisplayFunc(scene.render) glt.glutMainLoop()
def call_jump(track, ev, ptr): jumps = {0xC4: Call, 0xC8: Jump} evtype = jumps[ev] mode = BmsSeekMode(ptr.u8()) addr = ptr.u24() track.insert(evtype, mode=mode, addr=addr) # Assert that we aren't jumping into the middle of an instruction. hist = ptr.hist(addr) if hist == Visit.MIDDLE: raise OverlapError('%s %s - middle of command' % (jumps[ev], hex(addr))) # We are either visiting NONE or BEGIN. # Jump: Visit target (if necessary), then continue. # Call # Not visited: # Visit target. # Visited: # (addr) Check preconditions and apply postconditions. # Note that OldNote compares == None. # HISTORY_MODE == 'LAZY' # **** CALL **** if hist == Visit.NONE: track = BmsTrack(track._file, addr, None, track.note_history).parse() else: assert hist == Visit.BEGIN # **** ASSERTION **** # We will encounter issues if any touched notes differ from the original call. # TODO: We assume we're not calling into the middle of a visited section... :'( track = track._file.track_at[addr] different = (track.pre_history != track.note_history) if any(different & track.touched): LOG.error( '''Invalid call from %06X to %06X new:%s old:%s out:%s touched:%s''', track._prev_addr, addr, track.note_history, track.pre_history, track.note_history, track.touched) # endif already visited # BMS really shouldn't be touching notes, AND leaving running notes untouched... if any(track.touched): untouched = ~track.touched if any(track.pre_history.astype(bool) & untouched): LOG.warning('Function leaves running notes untouched %s %s %s', track.touched, track.pre_history, track.note_history) # Knowing that all "touched" entries entered identically... # We must apply all "touched" entries of the note history. track.note_history[track.touched] = track.note_history[track.touched] # **** JUMP **** if hist == Visit.NONE: track._ptr.addr = addr else: assert hist == Visit.BEGIN
def parse(track) -> 'BmsTrack': insert = track.insert ptr = track._ptr stop = False track._prev_addr = None # type: int while 1: prev_addr = ptr.addr track._prev_addr = prev_addr ev = ptr.u8(mode=Visit.BEGIN) # **** FLOW COMMANDS if 0: pass elif ev == 0xC1: # new track track.child(ptr) elif ev == 0xFF: # end track if track.tracknum is None: raise SeqError('end_track from function') insert(EndTrack, next=False) stop = True elif ev in [0xC4, 0xC8]: # call address track.call_jump(ev, ptr) elif ev == 0xC6: # pop address if track.tracknum is not None: raise SeqError('pop from root thread') insert(Pop, next=False) stop = True # **** SPECIAL COMMANDS elif ev == 0xE7: # init track unknown = ptr.u16() # ASSERT if unknown != 0: LOG.warning('Track init %04X != 0x0000', unknown) insert(InitTrack, unknown=unknown) elif ev in [0xA4, 0xAC]: # instr change track.instr_change(ptr, ev) elif ev == 0xFE: # tick rate insert(TickRate, value=ptr.u16()) elif ev == 0xFD: # tempo insert(Tempo, value=ptr.u16()) elif ev == 0xE6: # TODO unknown unknown = ptr.u16() # ASSERT if unknown != 0: LOG.warning('WriteRemovePool %04X != 0x0000', unknown) insert(WriteRemovePool, unknown=unknown) # **** NOTES elif ev < 0x80: # note on track.note_on(ptr, ev) elif ev in range(0x81, 0x88): track.note_off(ptr, ev) # **** CONTROL CHANGES elif 0x94 <= ev <= 0x9F: track.control_change(ptr, ev) # **** DELAYS elif ev == 0x80: insert(Delay, dtime = ptr.u8()) elif ev == 0x88: insert(Delay, dtime = ptr.u16()) elif ev == 0xEA: insert(Delay, dtime = ptr.u24()) # TODO 0xCF elif ev == 0xF0: insert(Delay, dtime = ptr.vlq()) # **** FALLBACK else: text = 'unknown event %s' % b2h(ev) LOG.warning(text) insert(text) stop = True assert prev_addr in track._file.at assert track._file.at[prev_addr] is not None if stop: break # HISTORY_MODE == 'LAZY' return track
def after(self, track: BmsTrack): if self.unknown != 0: LOG.warning('Track init %04X != 0x0000', unknown)