def cancelEvent(self, job, event_type, event_queue): new_queue = EventQueue() while not event_queue.empty(): event = event_queue.get() if event.job != job or event.typename != event_type: new_queue.put(event) return new_queue
def test_market_data_generator(): start_date = pd.to_datetime("2010-01-01") end_date = pd.to_datetime("2010-06-01") market_data = DailyBarsDataHander( path_prices="../../dataset_development/datasets/testing/sep.csv", path_snp500="../../dataset_development/datasets/macro/snp500.csv", path_interest="../../dataset_development/datasets/macro/t_bill_rate_3m.csv", path_corp_actions="../../dataset_development/datasets/sharadar/SHARADAR_EVENTS.csv", store_path="../test_bundles", start=start_date, end=end_date ) event_queue = EventQueue() i = 0 while True: # This loop generates new "ticks" until the backtest is completed. try: market_data_event = next(market_data.tick) # THIS MUST ONLY BE CALLED HERE! except Exception as e: # What error does the generator give? print(e) break else: event_queue.add(market_data_event) i += 1 if i >= 5: break assert len(event_queue.queue) == 5 assert isinstance(event_queue.get(), MarketDataEvent)
def __init__(self, config): from timer import TimerCollection from event import EventQueue self.state = STOPPED self.config = config self._info = None self.connections = [] self.event_queue = EventQueue() self.timers = TimerCollection() EventSource.__init__(self) EventListener.__init__(self) # Yes we are a listener to ourselves self.addListener(self) self.setState(STARTING) if self.config.getBindAddress() != None and \ self.config.getPort() != None: log.debug("Initializing server on %s:%d" % (self.config.getBindAddress(), self.config.getPort())) srv_sock = create_server_socket(self.config.getBindAddress(), self.config.getPort()) self.addConnection(ServerConnection(srv_sock)) log.debug("Server initialized") else: log.debug("Initialized non-server agent")
def __init__( self, market_data_handler: DataHandler, start: pd.datetime, end: pd.datetime, logger: Logger, output_path: str, initialize_hook: Callable = None, handle_data_hook: Callable = None, analyze_hook: Callable = None, print_state: bool = False, ): self.market_data = market_data_handler self.start = start self.end = end self.logger = logger self.output_path = output_path start_end_index = self.market_data.date_index_to_iterate self.perf = pd.DataFrame( index=start_end_index ) # This is where performance metrics will be stored self.stats = {} self.initialize = initialize_hook self.handle_data = handle_data_hook self.analyze = analyze_hook self.print = print_state self._event_queue = EventQueue() # Need to be set via setter methods self.portfolio = None self.broker = None
def _compute(self, points, bounding_box=None, step_by_step=True): self.T = AVLBeachLine() self.Q = EventQueue(points) log('EventQueue: %s' % str(self.Q)) while not self.Q.is_empty: event = self.Q.pop() log('Popped %s id=%s' % (event,id(event))) if isinstance(event, SiteEvent): log('_handle_site_event') self._handle_site_event(event) else: log('_handle_circle_event') self._handle_circle_event(event) if step_by_step: self.animate(event, draw_bottoms=False) log('%s' % self.T.T.dumps()) log('beachline: %s' % self.T) log('-----') self.animate(SiteEvent((0,-100)), draw_circle_events=False)
class Backtester(object): def __init__( self, market_data_handler: DataHandler, start: pd.datetime, end: pd.datetime, logger: Logger, output_path: str, initialize_hook: Callable = None, handle_data_hook: Callable = None, analyze_hook: Callable = None, print_state: bool = False, ): self.market_data = market_data_handler self.start = start self.end = end self.logger = logger self.output_path = output_path start_end_index = self.market_data.date_index_to_iterate self.perf = pd.DataFrame( index=start_end_index ) # This is where performance metrics will be stored self.stats = {} self.initialize = initialize_hook self.handle_data = handle_data_hook self.analyze = analyze_hook self.print = print_state self._event_queue = EventQueue() # Need to be set via setter methods self.portfolio = None self.broker = None def set_broker(self, broker_cls, **kwargs): """ Set broker instance that will execute orders and manage trades. """ if not issubclass(broker_cls, Broker): raise TypeError("Must be subclass of Broker") self.broker = broker_cls(self.market_data, **kwargs) def set_portfolio(self, portfolio_cls, **kwargs): """ Set the portfolio instance to use during the backtest. """ if not issubclass(portfolio_cls, Portfolio): raise TypeError("Must be subclass of Portfolio") if not isinstance(self.broker, Broker): raise ValueError( "broker must set to an instance of Broker, before instatiating the portfolio. " ) self.portfolio = portfolio_cls(market_data=self.market_data, broker=self.broker, **kwargs) def run(self): """ Runs the backtest. """ if self.initialize is not None: self.initialize(self) # self.context, self.time_context, self.perf time0 = time.time() while True: # This loop generates new "ticks" until the backtest is completed. try: market_data_event = next( self.market_data.tick) # THIS MUST ONLY BE CALLED HERE! except Exception as e: # What error does the generator give? print(e) break else: self._event_queue.add(market_data_event) # This is executed until all events for the tick has been processed while True: try: event = self._event_queue.get() except: # queue is empty break else: # maybe I should account for returned events may be None, and should not be added to the event_queue if event.type == 'DAILY_MARKET_DATA': if (event.is_business_day == True): if self.print: print("Generating signals") signals_event = self.portfolio.generate_signals() if signals_event is not None: self._event_queue.add(signals_event) elif event.type == 'SIGNALS': if self.print: print("Generating orders") orders_event = self.portfolio.generate_orders_from_signals( event) if orders_event is not None: self._event_queue.add(orders_event) elif event.type == 'ORDERS': if self.print: print("processing orders") trades_event, cancelled_orders_event = self.broker.process_orders( self.portfolio, event) # Might get no fills or cancelled orders if trades_event is not None: self._event_queue.add(trades_event) if cancelled_orders_event is not None: self._event_queue.add(cancelled_orders_event) elif event.type == 'TRADES': # Here the portfolios state with regards to active positions and return calculation can be handeled self.portfolio.handle_trades_event( event ) # Don't know what this will do yet. Dont know what it will return elif event.type == 'CANCELLED_ORDERS': self.portfolio.handle_cancelled_orders_event(event) """ Here the day is over, before ending the day and starting a new one we want to update the margin account according to latest close prices and update the balance and positions if any was liquidated throughout the day. Here we also process bankruptices, delistings, dividends, interest payment and interest reception. """ if self.market_data.is_business_day(): if self.print: print("manage active trades") margin_account_update_event = self.broker.manage_active_trades( self.portfolio) if margin_account_update_event is not None: # NOTE: # Now the mony from liquidated positions are available to update the margin account if self.print: print("Handle margin account update") self.portfolio.handle_margin_account_update( margin_account_update_event) # Process bankruptcies at the end of the day if self.print: print("handle corp actions") self.broker.handle_corp_actions(self.portfolio) # Dividends are payed at the end of the day if self.print: print("handle dividends") self.broker.handle_dividends(self.portfolio) # NOTE: Pay interest if self.print: print("handle interest on short positions") self.broker.handle_interest_on_short_positions(self.portfolio) # NOTE: Receive interest if self.print: print("handle interest on cash and margin accounts") self.broker.handle_interest_on_cash_and_margin_accounts( self.portfolio) if self.handle_data is not None: self.handle_data(self) # Notice that this is called after margin_account has been updated and liquidation events have updated the balance and the portfolio state self.capture_daily_state() report_progress(self.market_data.cur_date, self.start, self.end, time0, "Backtest") self.calculate_statistics() if self.analyze is not None: self.analyze(self) return self.perf def get_info(self): """Get initial setting of the backtest.""" print("Start: ", self.start) print("End: ", self.end) def capture_daily_state(self): """ Calculate various backtest statistics. These calculations are split into their own function, but the work is centralized here. """ self.portfolio.capture_state() self.broker.capture_state() def calculate_statistics(self): # Cost Related: self.stats["total_slippage"] = self.portfolio.costs["slippage"].sum(), self.stats["total_commission"] = self.portfolio.costs[ "commission"].sum(), self.stats["total_charged"] = self.portfolio.costs["charged"].sum(), self.stats["total_margin_interest"] = self.portfolio.costs[ "margin_interest"].sum(), self.stats["total_account_interest"] = self.portfolio.costs[ "account_interest"].sum(), self.stats["total_short_dividends"] = self.portfolio.costs[ "short_dividends"].sum(), self.stats["total_short_losses"] = self.portfolio.costs[ "short_losses"].sum(), # Received Related: self.stats["total_dividends"] = self.portfolio.received[ "dividends"].sum(), self.stats["total_interest"] = self.portfolio.received["interest"].sum( ), self.stats["total_proceeds"] = self.portfolio.received["proceeds"].sum( ), # Other Metrics self.stats["end_value"] = self.portfolio.calculate_portfolio_value(), self.stats[ "total_return"] = self.portfolio.calculate_return_over_period( self.start, self.end) self.stats[ "annualized_rate_of_return"] = self.portfolio.calculate_annulized_rate_of_return( self.start, self.end) self.stats[ "annualized_rate_of_index_return"] = self.portfolio.calculate_annulized_rate_of_index_return( self.start, self.end) # NOTE: Add all backtest statistic calculates to here self.stats["normality_test_result"] = self.portfolio.normality_test_on_returns() self.stats["sharpe_ratio"] = self.portfolio.calculate_sharpe_ratio() self.stats[ "adjusted_sharpe_ratio"] = self.portfolio.calculate_adjusted_sharpe_ratio( ) self.stats[ "std_portfolio_returns"] = self.portfolio.calculate_std_of_portfolio_returns( ) self.stats[ "std_snp500_returns"] = self.portfolio.calculate_std_of_snp500_returns( ) self.stats[ "correlation_to_underlying"] = self.portfolio.calculate_correlation_of_monthly_returns( ) self.stats[ "t_test_on_excess_return"] = self.portfolio.calculate_statistical_significance_of_outperformance_single_sample( ) self.stats["time_range"] = [self.start, self.end] self.stats[ "ratio_of_longs"] = self.broker.blotter.calculate_ratio_of_longs() self.stats["pnl"] = self.portfolio.portfolio_value["total"].iloc[ -1] - self.portfolio.initial_balance self.stats["hit_ratio"] = self.broker.blotter.calculate_hit_ratio() self.stats["average_aum"] = self.portfolio.calculate_average_aum() self.stats["capacity"] = self.broker.blotter.calculate_capacity() self.stats[ "maximum_dollar_position_size"] = self.broker.blotter.calculate_maximum_dollar_position_size( ) self.stats[ "frequency_of_bets"] = self.broker.blotter.calculate_frequency_of_bets( ) self.stats[ "average_holding_period"] = self.broker.blotter.calculate_average_holding_period( ) self.stats[ "pnl_short_positions"] = self.broker.blotter.calculate_pnl_short_positions( ) self.stats[ "pnl_long_positions"] = self.broker.blotter.calculate_pnl_long_positions( ) self.stats[ "number_of_unique_stocks"] = self.broker.blotter.count_unique_stocks( ) self.stats["number_of_trades"] = self.broker.blotter.count_trades() self.stats[ "average_return_from_hits"] = self.broker.blotter.calculate_average_return_from_hits( ) self.stats[ "average_return_from_misses"] = self.broker.blotter.calculate_average_return_from_misses( ) self.stats[ "highest_return_from_hit"] = self.broker.blotter.calculate_highest_return_from_hit( ) self.stats[ "lowest_return_from_miss"] = self.broker.blotter.calculate_lowest_return_from_miss( ) self.stats[ "broker_fees_per_dollar"] = self.broker.blotter.calculate_broker_fees_per_dollar( ) self.stats[ "broker_fees_per_stock"] = self.broker.blotter.calculate_broker_fees_per_stock( ) self.stats[ "annualized_turnover"] = self.broker.blotter.calculate_annualized_turnover( ) self.stats[ "closed_trades_by_cause"] = self.broker.blotter.count_closed_trades_by_cause( ) def save_state_to_disk_and_return(self): """ This will save a snapshot of all relevant state of the backtest and save it to disk as a pickle. The output is used to generate a dashboard for the backtest. """ backtest_state = { "timestamp": time.time(), "settings": { "start": self.start, "end": self.end, "output_path": self.output_path }, "perf": self.perf, "stats": self.stats, "portfolio": { "costs": self.portfolio.costs, "received": self.portfolio.received, "portfolio_value": self.portfolio.portfolio_value, "order_history": self.portfolio.order_history_to_df(), "signals": self.portfolio.signals_to_df(), "monthly_returns": self.portfolio.get_monthly_returns( ), # use to make histogram and return per time }, "broker": { "blotter_history": self.broker.blotter_history_to_df(), "all_trades": self.broker.all_trades_to_df(), "trade_objects": self.broker.all_trades_as_objects(), "cancelled_orders": self.broker.cancelled_orders_to_df() } } timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") pickle_path = self.output_path + "/backtest_state_" + timestamp + ".pickle" pickle_out = open(pickle_path, "wb") pickle.dump(backtest_state, pickle_out) pickle_out.close() return backtest_state
class Agent(EventSource, EventListener): def __init__(self, config): from timer import TimerCollection from event import EventQueue self.state = STOPPED self.config = config self._info = None self.connections = [] self.event_queue = EventQueue() self.timers = TimerCollection() EventSource.__init__(self) EventListener.__init__(self) # Yes we are a listener to ourselves self.addListener(self) self.setState(STARTING) if self.config.getBindAddress() != None and \ self.config.getPort() != None: log.debug("Initializing server on %s:%d" % (self.config.getBindAddress(), self.config.getPort())) srv_sock = create_server_socket(self.config.getBindAddress(), self.config.getPort()) self.addConnection(ServerConnection(srv_sock)) log.debug("Server initialized") else: log.debug("Initialized non-server agent") def getConnections(self): return self.connections def getConnection(self, name): for c in self.connections: if c.getName() == name: return c return None def getConnectionByInfo(self, info): for c in self.connections: if isinstance(c, AgentConnection): if c.getAgentInfo() == info: return c return None def addConnection(self, conn): self.connections.append(conn) log.debug("Connection Added (%d)" % (len(self.connections))) def dropConnection(self, conn): self.connections.remove(conn) log.debug("Connection Dropped (%d)" % (len(self.connections))) def addEvent(self, event): self.event_queue.push(event) log.debug("Event Added (%d)" % (len(self.event_queue))) def addTimer(self, timer): self.timers.add(timer) log.debug("Timer Added (%d)" % (len(self.timers))) def dropTimer(self, timer): self.timers.remove(timer) log.debug("Timer Removed (%d)" % (len(self.timers))) def getConfig(self): return self.config def getInfo(self): if self._info is None: self._info = AgentInfo(self.getConfig()) return self._info def getState(self): return self.state def setState(self, state): self.addEvent(StateChangeEvent(self, self.state, state)) if self.state != state: self.state = state def isRunning(self): return isinstance(self.state, RunningState) # Event Handlers def handleConnectionReadEvent(self, event): log.debug("Handling Read Event") obj = event.getSource().read() if obj != None: self.addEvent(obj) def handleConnectionWriteEvent(self, event): log.debug("Handling Write Event") event.getSource().write() def handleConnectionExceptionEvent(self, event): log.debug("Handling Connection Exception Event") event.getSource().disconnect() self.dropConnection(event.getSource()) def handleConnectEvent(self, event): log.debug("Handling connect event") self.addConnection(event.getNewConnection()) def handleMessageRecievedEvent(self, event): # All we will do a receive event is log it. # Its up to one of our listeners to care about it. log.debug("Received a message of type %s" % str(event.getMessage().__class__)) if isinstance(event.getMessage(), Request): log.debug("Request: %s" % str(event.getMessage())) if isinstance(event.getMessage(), Response): log.debug("Response: %s" % str(event.getMessage())) def handleMessageSendEvent(self, event): log.debug("Sending a message of type %s" % str(event.getMessage().__class__)) if isinstance(event.getMessage(), Request): log.debug("Request: %s" % event.getMessage().getKey()) if isinstance(event.getMessage(), Response): log.debug("Response: %s" % str(event.getMessage())) if isinstance(event.getTarget(), Connection): event.getTarget().write(str(event.getMessage())) _handlers = { ConnectionReadEvent: handleConnectionReadEvent, ConnectionWriteEvent: handleConnectionWriteEvent, ConnectionExceptionEvent: handleConnectionExceptionEvent, MessageSendEvent: handleMessageSendEvent, MessageReceivedEvent: handleMessageRecievedEvent, ConnectEvent: handleConnectEvent } def getHandlers(self): return Agent._handlers def notify(self, evt): found = 0 hndlrs = self.getHandlers() for h in hndlrs.keys(): if isinstance(evt, h): hndlrs[h](self, evt) found = 1 def processEvent(self): """Process a single event from the event_queue""" log.debug("Going to handle an event") event = self.event_queue.pop() if event != None: try: log.debug("Handling event %s" % str(event)) self.notifyListeners(event) except Exception, e: log.exception("Error handling event")
def main(): # Checa os parâmetros if (len(sys.argv) != 4): print("Usage: python " + sys.argv[0] + " <inicio> <fim> <arquivo>") sys.exit() else: (_ROOT, _DEPTH, _BREADTH) = range(3) cpu = Processor(1, 1) memory = Memory(10 * 1024 * 1024, 1024 * 1024) # 10MB memory # Seguir ordem Printer -> Reader -> Disk para declaração no vetor devices devices = [ DeviceManagement(DeviceType.Printer, 2 * 1000 * 1000 * 1000, 2), DeviceManagement(DeviceType.Reader, 100 * 1000 * 1000, 2), DeviceManagement(DeviceType.Disk, 8002 * 1000, 1) ] disk = Disk(8002 * 1000, 2 * 1000, 500 * 1024 * 1024) start = int(sys.argv[1]) end = int(sys.argv[2]) event_queue = EventQueue() event_queue.put(Event(EventType.BeginSimulation, start)) event_queue.put(Event(EventType.EndSimulation, end)) current_time = 0 while not event_queue.empty(): current_event = event_queue.get() current_job = current_event.job if current_event.current_time > current_time: current_time = current_event.current_time if current_event.typename == EventType.BeginSimulation: jobs = readJobs(disk, cpu) for i in jobs: event_queue.put( Event(EventType.BeginJob, i.scheduled_time, i)) current_time = start # Início do Job elif current_event.typename == EventType.BeginJob: event_queue.put( Event(EventType.RequestMemory, current_time, current_job)) # Final do Job elif current_event.typename == EventType.EndJob: pass elif current_event.typename == EventType.RequestMemory: memory.request(current_job, current_job.segment_tree.__getitem__(_ROOT), event_queue, current_time) elif current_event.typename == EventType.ReleaseMemory: print "\nMEMORY CONTENT BEFORE RELEASE" memory.printMemory() print "" memory.release(current_job, event_queue, current_time) event_queue.put( Event(EventType.EndJob, current_time, current_job)) elif current_event.typename == EventType.RequestCPU: current_job.active_segment = current_job.next_segment cpu.request(current_event.job, event_queue, current_time) elif current_event.typename == EventType.BeginTimeSlice: cpu.beginTimeslice(event_queue, current_time) # Schedule next interruption if current_job.nextAction( )[0] == JobAction.Nothing or current_job.nextAction()[1] <= 0: # Advance current_job.advanceAction(cpu.TIMESLICE) # Little Fix if current_job.missingTime() < current_job.nextAction()[1]: event_queue.put( Event( EventType.ReleaseCPU, current_time + current_job.missingTime(), current_job, current_job.missingTime() - current_job.nextAction()[1])) elif current_job.nextAction( )[0] != JobAction.Nothing and current_job.nextAction( )[1] <= cpu.TIMESLICE: if current_job.nextAction( )[0] == JobAction.SegmentReference: event_queue.put( Event(EventType.SegmentReference, current_time + current_job.nextAction()[1], current_job)) if current_job.nextAction()[0] == JobAction.IO: if current_job.ios[ current_job. current_io].typename == DeviceType.Disk: event_queue.put( Event( EventType.RequestFile, current_time + current_job.nextAction()[1], current_job)) else: event_queue.put( Event( EventType.RequestIO, current_time + current_job.nextAction()[1], current_job)) elif current_job.missingTime() < cpu.TIMESLICE: event_queue.put( Event(EventType.ReleaseCPU, current_time + current_job.missingTime(), current_job, current_job.missingTime())) elif current_event.typename == EventType.UseCPU: pass elif current_event.typename == EventType.SegmentLoaded: event_queue.put( Event(EventType.RequestCPU, current_time, current_job)) elif current_event.typename == EventType.ReleaseCPU: event_queue = cpu.release(current_job, event_queue, current_time) if current_job.missingTime() == 0: event_queue.put( Event(EventType.ReleaseMemory, current_time, current_job)) elif current_event.typename == EventType.RequestIO: if cpu.running_job == current_job: event_queue.put( Event(EventType.ReleaseCPU, current_time, current_job)) devices[current_job.ios[ current_job.current_io].device_number].request( current_job, event_queue, current_time) elif current_event.typename == EventType.UseIO: event_queue.put( Event( EventType.ReleaseIO, current_time + devices[current_job.ios[ current_job.current_io].device_number].time, current_job)) elif current_event.typename == EventType.ReleaseIO: devices[current_job.ios[ current_job.current_io].device_number].release( current_job, event_queue, current_time) current_job.advanceIO() event_queue.put( Event(EventType.RequestCPU, current_time, current_job)) elif current_event.typename == EventType.SegmentReference: if current_job.next_segment.memory is None: event_queue.put( Event(EventType.SegmentFault, current_time, current_job)) else: slice_run = current_job.nextAction()[1] current_job.active_segment = current_job.next_segment if current_job.missingTime() <= cpu.TIMESLICE: event_queue.put( Event( EventType.ReleaseCPU, current_time + current_job.missingTime() - slice_run, current_job, current_job.missingTime() - slice_run)) # Novo segmento sorteado nao está na memoria elif current_event.typename == EventType.SegmentFault: if cpu.running_job == current_job: event_queue.put( Event(EventType.ReleaseCPU, current_time, current_job)) # request memory if not memory.FULL: memory.request(current_job, current_job.next_segment, event_queue, current_time) else: memory_full_time_evaluation = 10 if current_job.missingTime() > current_job.nextAction()[1]: event_queue.put( Event(EventType.RequestCPU, current_time + memory_full_time_evaluation, current_job)) elif current_event.typename == EventType.EndTimeSlice: cpu.endTimeslice(event_queue, current_time) elif current_event.typename == EventType.RequestFile: if cpu.running_job == current_job: event_queue.put( Event(EventType.ReleaseCPU, current_time, current_job)) disk.diskRequest( current_job, current_job.ios[current_job.current_io].file.name, current_job.ios[current_job.current_io].operation, current_job.ios[current_job.current_io].number_tracks, event_queue, current_time) elif current_event.typename == EventType.UseFile: event_queue.put( Event( EventType.ReleaseFile, current_time + disk.useTime(current_job.ios[ current_job.current_io].number_tracks), current_job, disk.useTime(current_job.ios[ current_job.current_io].number_tracks))) elif current_event.typename == EventType.ReleaseFile: disk.diskRelease(current_job, event_queue, current_time) current_job.advanceIO() event_queue.put( Event(EventType.RequestCPU, current_time, current_job)) elif current_event.typename == EventType.EndSimulation: # Zera eventos na fila para finalizar a simulacao while not event_queue.empty(): event_queue.get() event_queue.task_done() current_time = end if current_job is not None: print "".join(str( current_event.current_time).ljust(15)) + "".join( current_event.typename.ljust(20)) + "".join( current_job.name.ljust(10)) + "".join( current_event.action.ljust(50)) + " " + str( current_job.executed_time) + "ns/" + str( current_job.execution_time) + "ns" else: print "".join(str( current_event.current_time).ljust(15)) + "".join( current_event.typename.ljust(20)) + "".join( " -".ljust(10)) + "".join( current_event.action.ljust(48)) + " -" if current_event.typename == EventType.ReleaseMemory: print "\nMEMORY CONTENT AFTER RELEASE" memory.printMemory() print ""
class VoronoiDiagram(object): def __init__(self, pts, bounding_box=None, step_by_step=True): self.input = pts self.bounding_box = bounding_box self.edges = [] self._faces = dict() self._compute([ Point(*p) for p in pts ],bounding_box, step_by_step) def _compute(self, points, bounding_box=None, step_by_step=True): self.T = AVLBeachLine() self.Q = EventQueue(points) log('EventQueue: %s' % str(self.Q)) while not self.Q.is_empty: event = self.Q.pop() log('Popped %s id=%s' % (event,id(event))) if isinstance(event, SiteEvent): log('_handle_site_event') self._handle_site_event(event) else: log('_handle_circle_event') self._handle_circle_event(event) if step_by_step: self.animate(event, draw_bottoms=False) log('%s' % self.T.T.dumps()) log('beachline: %s' % self.T) log('-----') self.animate(SiteEvent((0,-100)), draw_circle_events=False) def _handle_site_event(self, evt): if self.T.is_empty: self.T.insert(evt.site) else: a = self.T.search(evt.site) if a.circle_event is not None: log('Deleting false alarm %s id=%s' % (a.circle_event,id(a.circle_event))) a.circle_event = None log('\tInserting %s into arc %s' % (evt.site, a)) x = self.T.insert(evt.site, within=a) self._create_twins(x.site,a.site) # check the triples where this new site is the far left and far right arc. self.check_circle(self.T.predecessor(x),evt.site.y) self.check_circle(self.T.sucessor(x),evt.site.y) def _handle_circle_event(self, evt): if evt.is_valid: log('\tRemoving arc %s' % evt.arc) log('\t\tbefore removal: %s' % self.T) pred,suc = self.T.delete(evt.arc) log('\t\t after removal: %s' % self.T) log('\t\tChecking for deleting arcs involving %s' % evt.arc) if pred.circle_event is not None: log('\t\t\tGotta delete pred %s' % pred.circle_event) pred.circle_event = None else: log('\t\t\tpred: no circle_event') if suc.circle_event is not None: log('\t\t\tGotta delete suc %s' % suc.circle_event) suc.circle_event = None else: log('\t\t\tsuc: no circle_event') new_half_edge = self._create_twins(suc.site, pred.site) new_half_edge._origin = evt.center # finish incident hedges ! log('\t\tUpdate incident edges') log('\t\t\tpred/suc %s %s' % (pred, suc)) for left, right in ((pred.site, evt.arc.site), (evt.arc.site, suc.site)): half_edge = None log('\t\t\tleft/right %s %s' % (left, right)) log('\t\t\tneighbours of %s' % self._faces[left]) for he in self._faces[left]._iter_neighbours(): log('\t\t\t\t%s twin: %s' % (he, he._twin)) if he._twin._site == right: #half_edge = he he._origin = evt.center log('\t\t\t Chosen to be updated was %s' % he) #half_edge._origin = evt.center self.check_circle(pred, evt.y) self.check_circle(suc, evt.y) else: log('\t%s was previously deleted' % evt) def _create_twins(self, site1, site2): half_edge = Hedge(None, None, site1, self._faces.get(site1, None)) half_edge._twin = Hedge(None, half_edge, site2, self._faces.get(site2, None)) self.edges.append(half_edge) self._faces[site1] = half_edge self._faces[site2] = half_edge._twin log('\t\tCreated new half-edge: %s' % half_edge) log('\t\t\t _faces = %s' % self._faces) return half_edge def _check_circle(self, predecessor, arc, sucessor, sweep_line_y): log('\t\tcheck circle for triple %s %s %s' % (predecessor,arc,sucessor)) if not predecessor or not sucessor or not arc: return a, b, c = predecessor.site, arc.site, sucessor.site if determinant(b - a, c - b) <= 0: circle = circumcircle(a,b,c) if circle is not None: self.Q.push(CircleEvent(arc,circle)) def check_circle(self, arc, sweep_line_y): predecessor = self.T.predecessor(arc) sucessor = self.T.sucessor(arc) self._check_circle(predecessor,arc,sucessor,sweep_line_y)