def main(argv): try: opts, _ = getopt.getopt(argv, "p:", ["port"]) except getopt.GetoptError: sys.exit(1) global HTTP_PORT global devices global configurations global log_db global general_responses for opt, arg in opts: if opt in ("-p", "--port"): try: HTTP_PORT = int(arg) except ValueError: raise ValueError sys.exit(1) ticker = Ticker() try: logging.basicConfig(filename=LOG_FILE, \ level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p') log_db = SqliteHelper() load_general_responses() server = MyServer(('', HTTP_PORT), RequestHandler) ticker.start() print "Http server stated at %d" % HTTP_PORT server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down server' ticker.stop() server.socket.close()
def _one(self, a, b, n=2, d=3): eps = 1e-8 with self.subTest(a=a, b=b, n=n, d=d): t = Ticker(n, d) # ticks, prefix, labels = t(a, b) # print(a, b, ticks, prefix, labels) i = t.step(b - a) self.assertGreaterEqual((b - a)/i, t.min_ticks) j = t.ticks(a, b) self.assertGreaterEqual(j[0] + i*eps, a) self.assertLess(j[0] - i*(1 + eps), a) self.assertLessEqual(j[-1] - i*eps, b) self.assertGreater(j[-1] + i*(1 + eps), b) self.assertGreaterEqual(len(j), t.min_ticks) self.assertLessEqual(len(j), np.ceil(t.min_ticks*5/2)) o = t.offset(j[0], j[1] - j[0]) q = j - o m = t.magnitude(q[0], q[-1], q[1] - q[0]) q = q/m self.assertLess(abs(q[0]/(q[1] - q[0])), 10**(t.precision + 1)) if o: # not the only reason: # self.assertGreater(abs(j[0]/(j[1] - j[0])), 10**t.precision) self.assertGreater(q[0] + i*eps/m, 0) ticks, prefix, labels = t(a, b) self.assertEqual(sorted(set(labels)), sorted(labels)) v = [eval((prefix + l).replace("−", "-").replace("×", "*")) for l in labels] np.testing.assert_allclose(ticks, v, atol=2e-14*i)
def transform_images(image_sequence, processors_num=None, use_shared_memory=False): ticker = Ticker() image_sequence["enhanced"] = ('i', rescale_intensity( image_sequence["enhanced"][1].astype( np.double))) image_sequence["skeletons"] = ('i', (image_sequence["skeletons"][1] > 0).astype( np.uint8)) ticker.tick("\nTransforming images...") transform = ParallelMap(processors_num) image_sequence["binaries"] = ('i', transform.map(transform_binary, image_sequence["binaries"][1])) image_sequence["branching"] = ('i', transform.map( transform_distance, image_sequence["branching"][1])) ticker.tock(" Finished.") if use_shared_memory: ticker.tick("\nMapping to shared memory...") image_sequence_shared = to_shared_memory(image_sequence) ticker.tock(" Finished.") # Return shared memory return image_sequence_shared return image_sequence
def __init_ticker__(self): ticker = Ticker(None if not read_bustime_data_from_disk else 0) ticker.register(self.check_buses, self.between_checks) #TODO: only print new status on non-15-sec ticks if it hasn't changed ticker.register(self.broadcast_status, self.between_status_updates) ticker.global_error(self.__global_error__) ticker.start()
def docMappings(self, connection, indexName=DEFAULT_INDEX): try: connection.indices.put_mapping(index=indexName, doc_type="orderbook", body=Orderbook().orderbookMapping) connection.indices.put_mapping(index=indexName, doc_type="ticker", body=Ticker().tickerMapping) connection.indices.put_mapping(index=indexName, doc_type="completed_trades", body=Trade().completedTradeMapping) connection.indices.put_mapping(index=indexName, doc_type="future_ticker", body=Ticker().futureTickerMapping) connection.indices.put_mapping(index=indexName, doc_type="future_price_index", body=FutureIndex().futurePriceIndexMapping) connection.indices.put_mapping(index=indexName, doc_type="kline_candles", body=KlineCandle().klineMapping) except: raise pass
def run_tracker(): ''' Runs stock tracker with Submissions and ticker instances ''' subreddit = Submissions('pennystocks') stock = Ticker() for submission in subreddit.subred.new(limit=1000): stock_iter = stock.find_ticker(subreddit.retrieve_submission('title', submission)) if stock_iter != False: count[stock_iter] += 1 else: continue print ('Most common:') for company, cnt in count.most_common(5): print ('%s: %7d' % (company,cnt))
def relation_US_BA(ticker, ticker_ba): tck_ba = Ticker(ticker_ba, begin=begin) tck = Ticker(ticker, begin=begin) tck.get_historical_price_data() tck_ba.get_historical_price_data() prices = {} for price in tck.historical_data['prices']: fd = price['formatted_date'] prices[fd] = price prices_ba = {} for price in tck_ba.historical_data['prices']: fd = price['formatted_date'] prices_ba[fd] = price rel = [] for pk in prices_ba: if pk in prices: print({'date': pk, 'price_ba': prices_ba[pk]['adjclose'], 'price': prices[pk]['adjclose']}) try: rel.append(prices[pk]['adjclose']/prices_ba[pk]['adjclose']) except Exception as e: print(e) rel = np.asarray(rel) return rel
def on_execute(self): if self.on_init() == False: self._running = False pygame.mixer.init() pygame.mixer.music.load( os.path.join(os.path.dirname(__file__), "..\sound\\Pixelland.wav")) pygame.mixer.music.play(-1) tmx_loader = TmxLoader() map = tmx_loader.load_map("game_map") sprite_handler = SpriteHandler() entity_handler = EntityHandler(tmx_loader) map = Map(map) player = Player(Location.Spawn, ID.Player, "Engineer", Sprite_ID.Player_d4, map) inv = Inv(None) entity_handler.add_entity(player) renderer = Renderer(entity_handler, sprite_handler, self._display_surf) ticker = Ticker(entity_handler, self, inv) time_next_tick = 0 time_next_render = 0 tick = 0 render = 0 nextsecond = 0 while (self._running): time_now = pygame.time.get_ticks() if (time_now > time_next_tick): ticker.tick() time_next_tick = time_now + 1000 / self.tps tick += 1 if (time_now > time_next_render): renderer.render(entity_handler, map, inv) time_next_render = time_now + 1000 / self.fps render += 1 if (nextsecond < time_now): print("TPS: " + str(tick) + " FPS: " + str(render)) tick, render = 0, 0 nextsecond = time_now + 1000 self.on_cleanup()
def __init__(self, zoomFactor=1.05, zoomMargin=.1, dynamicRange=1e9): QtWidgets.QWidget.__init__(self) self.zoomMargin = zoomMargin self.dynamicRange = dynamicRange self.zoomFactor = zoomFactor self.ticker = Ticker() self.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) action = QtWidgets.QAction("V&iew range", self) action.setShortcut(QtGui.QKeySequence("CTRL+i")) action.setShortcutContext(QtCore.Qt.WidgetShortcut) action.triggered.connect(self.viewRange) self.addAction(action) action = QtWidgets.QAction("Sna&p range", self) action.setShortcut(QtGui.QKeySequence("CTRL+p")) action.setShortcutContext(QtCore.Qt.WidgetShortcut) action.triggered.connect(self.snapRange) self.addAction(action) qfm = QtGui.QFontMetrics(self.font()) self._labelSize = QtCore.QSize( (self.ticker.precision + 5) * qfm.averageCharWidth(), qfm.lineSpacing()) self._start, self._stop, self._num = None, None, None self._axisView = None self._offset, self._drag, self._rubber = None, None, None
def process(sub,sort,num): curr = main.readSub(sub,sort,num) dictionary = {} count = 0 for element in curr: tokenize= word_tokenize(element) count = 0 mentions = [] for item in tokenize: if(len(item) > 1 and binarySearch(symbols,item, 0, None) > 0): if(item in dictionary): dictionary[item].mentions+=1 if(item not in mentions): mentions.append(item) else: dictionary[item] = Ticker(item,1,0) elif item in keypos: count +=1 elif item in keyneg: count -=1 for stuff in mentions: if(count>0): dictionary[stuff].sentiment+=1 elif(count <0): dictionary[stuff].sentiment-=1 dictionarySort = OrderedDict(sorted(dictionary.items(), key=lambda i: i[1].mentions, reverse = True)) return dictionarySort
def findCollision(originalDigest, fileName, zeros=9, bytesQty=1): topNumber = getZeroQty(zeros) originalFragment = originalDigest[0] tick = Ticker() end = 0 attempts = 0 found = False while not found: attempts += 1 number = random.randint(0, topNumber) byteNum, digest, fragment = buildData(number) if qtyBytesOfCollition(originalDigest, digest, bytesQty): found = True bFile = struct.pack('q', 0) bNumber = struct.pack('q', number) end = tick() printInfo(originalDigest, bFile, fileName, 'FRAGMENTO A ENCONTRAR', False) printInfo(digest, bNumber, number, 'COLISION PARCIAL') print('Colisión de', bytesQty, 'byte(s)') print("\n> Intentos:", attempts) print('> Tiempo: %s seg.' % round(end, 4)) print('> Collision rate: %s c/s' % round(attempts / end, 4)) break return attempts, round(end, 4), round(attempts / end, 4)
def collect_data_again(batch_id, faults): for i, ticker in enumerate(faults): try: retries = { key: key in faults[ticker] for key in ['analysis', 'keystats', 'ohlc', 'options'][:2] } ticker_obj = Ticker(ticker, logger, batch_id, retries, faults[ticker]) faults[ticker] = ticker_obj.fault_dict time.sleep(SLEEP) logger.info(f"{ticker},{batch_id},Re-Ticker,Success,") except Exception as e: logger.warning(f"{ticker},{batch_id},Re-Ticker,Failure,{e}") pct = (i + 1) / len(faults) pct = np.round(100 * pct, 4) logger.info(f"SCRAPER,{batch_id},RE-PROGRESS,{pct}%,") return faults
def __init__(self, config, core): super(LCDFrontend, self).__init__() self.lcd = RPi_I2C_driver.lcd() if config['lcd']['display_temperature']: bus = SMBus(config['lcd']['bme280_i2c_bus']) self.bme280 = BME280(i2c_dev=bus) else: self.bme280 = None self.ticker = Ticker(self.actor_ref, LCDFrontend.TICK_INTERVAL) self.ticker.start() self.stream_title = "" self.st_pos = 0 self.lifetime = 0 self.lcd_on = True self.volume = 0
def track_individual(input): # Initialize global variables global Global_Sequence global Global_Parameters global Global_Lock global Output_Folder # Initialize ticker ticker = Ticker(Global_Lock) # Unpack data index, data = input filename, initial_polyline = data ticker.tick(" Started tracking {0}.".format(filename)) # Reparametrize contour new_x, new_y, new_step = reparametrize(initial_polyline[:, 0], initial_polyline[:, 1], Global_Parameters.delta, 'linear') initial_points = np.dstack([new_x, new_y])[0] tracking_result = None try: # Create tracker tracker = Tracker(Global_Sequence, Global_Parameters) # Initialize log log = "" result = tracker.track(initial_points, log) ticker.tock(" Finished: " + filename) if result is not None: if Global_Lock is not None: Global_Lock.acquire() try: zip_path = os.path.join(Output_Folder, '{0}.zip'.format(filename)) # Save ZIP zip_csv(zip_path, result["snake_trajectory"][:, -1]) except: pass if Global_Lock is not None: Global_Lock.release() except Exception as e: print e traceback.print_exc() ticker.tock(" Failed: " + filename) return tracking_result
def addTicker(self, ticker, bought=0, price=0): data = yf.download(ticker, period="1d", group_by='ticker') if data.empty: print("bad ticker") else: self.tickers.append( Ticker(ticker, price=round(data['Close'][0], 2), bought=bought, boughtPrice=price))
def __init__(self): self.ticker = Ticker() if self.ticker.update_quote() == False : return self.rootWin = Tk() self.rootWin.protocol("WM_DELETE_WINDOW", self.tk_delete) self.quit = False self.displayDict = {} self.entry_count = 0 self.timer_interval = 10000 #in ms rowVal = 0 for elem in self.ticker.ticker_dict_list : labelStr = elem["Symbol"] + ": " + elem["LastTradePriceOnly"] price = StringVar() symbolWidget = Label(self.rootWin, textvariable=price) symbolWidget.grid(row=rowVal, column=0) price.set(labelStr) labelStr = elem["ChangeinPercent"] percentChange = StringVar() percentWidget = Label(self.rootWin, textvariable=percentChange) percentWidget.grid(row=rowVal, column=1) percentChange.set(labelStr) rowVal += 1 self.color_code(labelStr, symbolWidget, percentWidget) labelStr = "day: " + elem["DaysRange"] daysRange = StringVar() Label(self.rootWin, textvariable=daysRange).grid(row=rowVal) daysRange.set(labelStr) rowVal += 1 labelStr = "52week: " + elem["YearRange"] yearRange = StringVar() Label(self.rootWin, textvariable=yearRange).grid(row=rowVal) yearRange.set(labelStr) rowVal += 1 Label(self.rootWin, text="").grid(row=rowVal) rowVal += 1 self.displayDict[elem["Symbol"]] = \ (price, percentChange, daysRange, yearRange, symbolWidget, percentWidget)
def selectChannel(name): global ticker import uwebsockets.client as uwc lcd.clear() lcd.text(lcd.CENTER, 40, 'joining') lcd.text(lcd.CENTER, 60, 'chat') lcd.font(lcd.FONT_UNICODE) if tcfg['tickers']: ticker = Ticker([''] * 9, 0xffffff, rotation=0, sliding=False, speed=16, delay=10, x=2, multiline=True) exitChat = False with uwc.connect('wss://irc-ws.chat.twitch.tv') as ws: ws.send('NICK justinfan123') #ws.send('CAP REQ :twitch.tv/tags') ws.send('JOIN #' + name) msg = ws.recv() i = 0 while msg.find('/NAMES') == -1: msg = ws.recv() msg = ws.recv() while not exitChat: if msg != '': #gc.collect() parseChatMsg(msg) msg = ws.recv() while btnB.isPressed(): exitChat = True if tcfg['tickers']: ticker.stop() del ticker return mainMenu
def __init__(self): """ Set up the IOManager thread and start it. """ super(_IOManager, self).__init__() self.__poll = iopoll.IOPoll() self.__et = self.__poll.set_edge_triggered(True) self.__wrappers = {} self.__disconnected_wrappers = [] self.__running = True self.__wrapper_lock = threading.RLock() self.__poll_lock = threading.RLock() self.__logger = logging.getLogger('Borg.Brain.Util.IOManager') self.__logger.addHandler(nullhandler.NullHandler()) self.__empty_time = time.time() + 10 self.__next_tick = time.time() + 0.1 self.__parent_thread = threading.current_thread() self.__read_list = set([]) self.__write_list = set([]) self.__ticker = Ticker(10) self.__saved_time = 0 # Set up wake up pipe in non-blocking mode self.__wakeup_read, self.__wakeup_write = os.pipe() fl = fcntl.fcntl(self.__wakeup_read, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_read, fcntl.F_SETFL, fl | os.O_NONBLOCK) fl = fcntl.fcntl(self.__wakeup_write, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_write, fcntl.F_SETFL, fl | os.O_NONBLOCK) self.__poll.register(self.__wakeup_read, True, False, True) # Start IOManager thread self.start()
def test_load_config(self, mock_get_config, mock_viewer, mock_crypto): # Don't want to wait on sleep in tests time.sleep = mock.MagicMock() t = Ticker() mock_get_config.return_value = "CONFIG!" t.load_config() self.assertEqual(t.config, 'CONFIG!') mock_get_config.side_effect = Exception() t.load_config() t.viewer.display_message.assert_called_with("Could not load config :(", MessageType.STATIC) mock_get_config.side_effect = FileNotFoundError() t.load_config() t.viewer.display_message.assert_called_with("Config file not found.", MessageType.STATIC)
def collect_data(batch_id, tickers): for i, ticker in enumerate(tickers): try: Ticker(ticker, logger, batch_id) time.sleep(SLEEP) logger.info(f"{ticker},{batch_id},Ticker,Success,") except Exception as e: logger.warning(f"{ticker},{batch_id},Ticker,Failure,{e}") pct = (i + 1) / len(tickers) pct = np.round(100 * pct, 4) logger.info(f"SCRAPER,{batch_id},PROGRESS,{pct}%,")
def get_ticker_cotation(self, coin: str) -> Ticker: try: url = "/".join((self.base_url, self.request_path, coin, 'ticker')) response = requests.get(url) if response.status_code not in range(200, 299): self.logger.error( "Ticker request not in range between 200 and 299") raise Exception( "Ticker request not in range between 200 and 299") results = response.json()['ticker'] return Ticker(results) except Exception as e: self.logger.error(e) raise
def main(): bot = commands.Bot(command_prefix=commands.when_mentioned_or("!!")) env = os.getenv("BENSONBOT_ENV_TYPE") if env.upper() == "MAIN": from duty_manager import DutyManager from memeail import ProblemOfTheDay from shitposter import Shitposter from ticker import Ticker from timers import CSE107Timer, CSE113Timer from message_scheduler import MessageScheduler bot.add_cog(CSE113Timer(bot)) bot.add_cog(CSE107Timer(bot)) bot.add_cog(DutyManager(bot)) bot.add_cog(MessageScheduler(bot)) bot.add_cog(ProblemOfTheDay(bot)) bot.add_cog(Shitposter(bot)) bot.add_cog(Ticker(bot)) elif env.upper() == "PLAYGROUND": from playground import Playground bot.add_cog(Playground(bot)) elif env.upper() == "TESTING": from scoreboard import Scoreboard from shitposter import Shitposter bot.add_cog(Scoreboard(bot)) bot.add_cog(Shitposter(bot)) else: print("Unknown environment ({})".format(env)) return print("Starting with env {}".format(env)) bot.run(os.getenv("BENSONBOT_DISCORD_TOKEN"))
def test_setup_connection(self, setup_wifi, mock_has_internet_connection, mock_viewer): mock_has_internet_connection.return_value = True t = Ticker() t.setup_connection() setup_wifi.assert_not_called() mock_has_internet_connection.return_value = False setup_wifi.return_value = "DID IT BOYS" t.setup_connection() setup_call = mock.call("SETUP", MessageType.STATIC) connect_call = mock.call("Successfully connected to DID IT BOYS", MessageType.SCROLLING) t.viewer.display_message.assert_has_calls([setup_call, connect_call]) setup_wifi.assert_called() setup_wifi.side_effect = [Exception(), "DID IT BOYS"] t.setup_connection() error_call = mock.call("Error connecting to Wi-Fi. Please try again.", MessageType.SCROLLING) t.viewer.display_message.assert_has_calls([setup_call, error_call])
def __init__(self): self.ticker = Ticker() self.ticker.update_quote() pygame.init() # Set the width and height of the screen [width, height] self.size = (SCREEN_WIDTH, SCREEN_HEIGHT) self.screen = pygame.display.set_mode(self.size) pygame.display.set_caption("Ticker LLC") self.setup_font() # Loop until the user clicks the close button. self.done = False # Used to manage how fast the screen updates self.clock = pygame.time.Clock() self.time_elapsed = pygame.time.get_ticks() self.displayList = []
def __init__(self): """ Set up the IOManager thread and start it. """ super(_IOManager, self).__init__() self.__poll = iopoll.IOPoll() self.__et = self.__poll.set_edge_triggered(True) self.__wrappers = {} self.__disconnected_wrappers = [] self.__running = True self.__wrapper_lock = threading.RLock() self.__poll_lock = threading.RLock() self.__logger = logging.getLogger("Borg.Brain.Util.IOManager") self.__logger.addHandler(nullhandler.NullHandler()) self.__empty_time = time.time() + 10 self.__next_tick = time.time() + 0.1 self.__parent_thread = threading.current_thread() self.__read_list = set([]) self.__write_list = set([]) self.__ticker = Ticker(10) self.__saved_time = 0 # Set up wake up pipe in non-blocking mode self.__wakeup_read, self.__wakeup_write = os.pipe() fl = fcntl.fcntl(self.__wakeup_read, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_read, fcntl.F_SETFL, fl | os.O_NONBLOCK) fl = fcntl.fcntl(self.__wakeup_write, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_write, fcntl.F_SETFL, fl | os.O_NONBLOCK) self.__poll.register(self.__wakeup_read, True, False, True) # Start IOManager thread self.start()
def upload_sequence(path, processors_num=None, use_shared_memory=False): filenames = [ 'enhanced.tif', 'skeletons.tif', 'binaries.tif', 'branching.tif', # check if we really need this one 'gvf_magnitude.tif', 'gvf_angle.tif', 'branching_coords.zip' ] ticker = Ticker() ticker.tick("\nLoading image sequence...") image_sequence = ImageSequence.load(path, filenames) ticker.tock(" Finished.") return transform_images(image_sequence, processors_num, use_shared_memory)
from SimpleCV import * from ticker import Ticker import IPython cam = Camera(0) t = Ticker() while True: img = cam.getImage() img = img.binarize(200).invert() blobs = img.findBlobs() if blobs: for b in blobs: img.drawCircle(b.centroid(),10,color=Color.GREEN) t.tick() img.show()
class _IOManager(threading.Thread): """ ############################################################################ ## The IOManager ## ############################################################################ The _IOManager class is a class that is used to manage objects that wrap file descriptors in some way, such as network sockets, pipes, pty's and files. The _IOManager class should never be instantiated directly but should always be accessed through the use of the IOManager() function that makes it sort-of singleton. The reason the __new__ method is not used instead is that that occasionally gives problems when registering new objects, due to the threaded nature of the object. The _IOManager runs in a loop, polling all the registered file descriptors. For each file descriptor, functions are called to notify them that there is data to read or write. """ __instance = None __signal_handlers = {} profile = None ####################################################################### # Class Methods ####################################################################### # The following methods will register and unregister signal handlers # and handle the signals themselves. ####################################################################### @classmethod def signal_handler(cls, signum, frame): """ This method can be registered as a signal handler and will terminate the IOManager thread on the signal and call the original signal handler. """ global _iom_instance # Find name of signal signame = str(signum) for key in signal.__dict__.keys(): if key.startswith("SIG") and getattr(signal, key) == signum: signame = key break try: logger = _iom_instance._IOManager__logger logger.warning("Caught signal %s. Terminating IOManager" % signame) except: print "Caught signal %s. Terminating IOManager" % signame original_handler = None if signal in cls.__signal_handlers: original_handler = cls.__signal_handlers[signal] clear_IOM() if original_handler: original_handler(signal, frame) else: sys.exit(1) @classmethod def set_signal_handlers(cls, signals): """ This method is called whenever a IOManager is instantiated. It sets the signal_handler to be the handler for a set of signals to stop the thread when they arrive. """ for sig in signals: try: original_handler = signal.getsignal(sig) if original_handler == cls.signal_handler: continue signal.signal(sig, cls.signal_handler) cls.__signal_handlers[sig] = original_handler except Exception as e: pass @classmethod def restore_signal_handlers(cls): """ This method is called when a IOManager thread ends to restore the original behavior. """ signals = cls.__signal_handlers.keys() for sig in signals: try: signal.signal(sig, cls.__signal_handlers[sig]) except Exception as e: pass cls.__signal_handlers = {} ####################################################################### # Constructor and destructor ####################################################################### def __del__(self): """ Stop the IOManager thread when the object is deleted. """ self.stop() def __init__(self): """ Set up the IOManager thread and start it. """ super(_IOManager, self).__init__() self.__poll = iopoll.IOPoll() self.__et = self.__poll.set_edge_triggered(True) self.__wrappers = {} self.__disconnected_wrappers = [] self.__running = True self.__wrapper_lock = threading.RLock() self.__poll_lock = threading.RLock() self.__logger = logging.getLogger('Borg.Brain.Util.IOManager') self.__logger.addHandler(nullhandler.NullHandler()) self.__empty_time = time.time() + 10 self.__next_tick = time.time() + 0.1 self.__parent_thread = threading.current_thread() self.__read_list = set([]) self.__write_list = set([]) self.__ticker = Ticker(10) self.__saved_time = 0 # Set up wake up pipe in non-blocking mode self.__wakeup_read, self.__wakeup_write = os.pipe() fl = fcntl.fcntl(self.__wakeup_read, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_read, fcntl.F_SETFL, fl | os.O_NONBLOCK) fl = fcntl.fcntl(self.__wakeup_write, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_write, fcntl.F_SETFL, fl | os.O_NONBLOCK) self.__poll.register(self.__wakeup_read, True, False, True) # Start IOManager thread self.start() ############################################################################ ## IOManager thread ## ############################################################################ def run(self): """ This method will be called when the thread starts. It wraps the _run method to make it possible to profile the IOManager. """ if not self.__class__.profile is None: import cProfile cminstance = self cProfile.runctx('self._run()', globals(), locals(), self.__class__.profile) else: self._run() def _run(self): """ This method is the main thread of the IOManager. It polls all active IOWrappers and calls their read and write handlers when data can be read or written. It will also try to connect any unconnected IOWrappers that have been registered. """ global _iom_instance self.__logger.info("IOManager started") timeout = 0 while self.__running: # Poll for events if self.__et: self.__poll_et() else: self.__poll_lt() # Perform IOWrapper checking on the actual # frequency of the ticker, not each time # there is data to read or write if time.time() >= self.__next_tick: self.__next_tick += self.__ticker.get_ticktime() # Call the handle loop function of each connected wrapper self.__handle_loop() # Attempt to connect all IOWrappers self.__attempt_wrappers() # Check if IOManager should keep running self.__check_stop() # Allow for control to be interrupted if other threads require # attention. Do not waste time on this so use a very short sleep time.sleep(0.0001) self.__cleanup() clear_IOM() self.__logger.debug("Savings of wakeup pipe: %f seconds" % self.__saved_time) self.__logger.info("IOManager stopped") ############################################################################ ## Helper methods ## ############################################################################ ## The following methods are (private) helper methods for the IOManager, ## ## handling events and validating wrappers. ## ############################################################################ def __poll_lt(self): """ Helper method for the main thread. Process all events from the poller: call the respective read and write handlers. This version of the method is for Level Triggered behavior, such as kqueue and poll. It will call the read/write handlers of each FD that is ready once. """ timeout = self.__ticker.end_tick(False) self.__last_tick_end = time.time() + timeout with self.__poll_lock: events = self.__poll.poll(timeout * 1000) self.__ticker.start_tick() # Process events on the IOWrappers for fd, event in events: # If this was the wake-up pipe, skip if self.__check_wakeup(fd, len(events)): continue wrapper = self.__find_wrapper(fd) if not wrapper: continue if event & iopoll.IO_ERROR: self.__wrap_function(wrapper, "close") if event & iopoll.IO_READ: self.__wrap_function_io(wrapper, "_handle_read") if event & iopoll.IO_WRITE: self.__wrap_function_io(wrapper, "_handle_write") def __poll_et(self): """ Helper method for the main thread. Process all events from the poller: call the respective read and write handlers. This version of the method is for Edge Triggered behavior, when using epoll. It will maintain a set of file descriptors ready for reading and writing. On each loop, it will check if those sets are empty. If they are, a call to the poll function is made with the timeout returned by the ticker, to obtain the set frequency. If the sets are not empty, the lists are quickly updated by calling poll with a timeout of 0. The ready lists are updated with the events returned by the call to poll. """ if self.__read_list or self.__write_list: timeout = 0 else: timeout = self.__ticker.end_tick(False) self.__last_tick_end = time.time() + timeout with self.__poll_lock: events = self.__poll.poll(timeout * 1000) self.__ticker.start_tick() # Update the lists for reading and writing, and # remove/close the file descriptors with an # error state. error_list = set([]) for fd, event in events: if fd in error_list: continue if self.__check_wakeup(fd, len(events)): continue if event & iopoll.IO_ERROR: self.__read_list.discard(fd) self.__write_list.discard(fd) error_list.add(fd) wrapper = self.__find_wrapper(fd) if wrapper: self.__wrap_function(wrapper, "close") continue if event & iopoll.IO_READ: self.__read_list.add(fd) if event & iopoll.IO_WRITE: self.__write_list.add(fd) # Perform IO self.__perform_io_et(self.__ticker.end_tick(False) * 0.9) def __perform_io_et(self, max_time): """ After polling and updating of the ready lists has completed the function iterates over the file descriptors ready for reading and writing. The _handle_read and _handle_write of each file descriptor wrapper will be called round-robin in a loop, for as long as the current tick lasts. If the file descriptors become non-readable or non-writable before this happens, the function simply exits and on will continue in the next loop. """ # Perform IO on ready file descriptors start = time.time() while time.time() < start + max_time: if not self.__read_list and not self.__write_list: break # Handle file descriptors ready for reading read_fds = sorted(self.__read_list) for fd in read_fds: wrapper = self.__find_wrapper(fd) if not wrapper: self.__read_list.discard(fd) continue try: self.__wrap_function_io(wrapper, '_handle_read') except IOWrapperEnd: self.__read_list.discard(fd) # Handle file descriptors ready for writing write_fds = sorted(self.__write_list) for fd in write_fds: wrapper = self.__find_wrapper(fd) if not wrapper: self.__write_list.discard(fd) continue try: self.__wrap_function_io(wrapper, '_handle_write') except IOWrapperEnd: self.__write_list.discard(fd) def __check_wakeup(self, fd, num): """ This method checks if the fd is the wakeup pipe, and if so, reads the data from that pipe. """ if fd == self.__wakeup_read: if num == 1: # Was woken up, count savings. self.__last_tick_end # if the time the poll would have ended if it was # not awoken by the wakeup pipe. self.__saved_time += self.__last_tick_end - time.time() try: # Empty pipe while True: os.read(self.__wakeup_read, 1) except OSError as err: if err.errno == errno.EAGAIN: # Resource Temporarily Unavailable, occurs when there is # no more data to read. Normal behavior. return True # Other error, something else is wrong. Should not happen. raise return False def __handle_loop(self): """ This method calls the handle_loop function of each wrapper, once every true tick. This should be at the frequency of the ticker, about 10 times per second. """ with self.__wrapper_lock: for wrapper in self.__wrappers.values(): self.__wrap_function(wrapper, "_handle_loop") def __wrap_function(self, wrapper, funcname): """ This method calls a function on the wrapper in a try/except block. This is to make sure the IOManager will keep on running even when exceptions occur in one of the wrappers. This will prevent from the complete system to collapse when one IOWrapper fails. """ try: function = getattr(wrapper, funcname) return function() except Exception as err: self.__handle_exception(wrapper, funcname, err) return None def __wrap_function_io(self, wrapper, funcname): """ This method calls a function on the wrapper in a try/except block. This is to make sure the IOManager will keep on running even when exceptions occur in one of the wrappers. This will prevent from the complete system to collapse when one IOWrapper fails. The difference with this function and __wrap_function is that this one is aimed at the _handle_read and _handle_write methods. It will catch the IOWrapperEnd exception and raise it again, to indicate that the fd can no longer be read or written. """ try: function = getattr(wrapper, funcname) return function() except IOWrapperEnd: # Expected exception when no more data can be read or written if self.__et: # poll_et needs this to update ready list raise else: # poll_lt doesn't need this return False except Exception as err: self.__handle_exception(wrapper, funcname, err) return None def __handle_exception(self, wrapper, method, exception): """ This method is called whenever an exception has occured in any of the wrappers functions. It outputs the message and removes the wrapper from the IOManager. """ # First print the normal exception output #traceback.print_exc() self.__logger.critical("Exception raised in %s in IOWrapper %s. " \ "Removing IOWrapper from IOManager. The " \ "exception is: %s" \ % (repr(wrapper), method, repr(exception))) fd = None # Get fd from object if method != "fileno": try: fd = wrapper.fileno() except: pass if not fd: # Look through the list of wrappers to find the wrapper for cur_fd, fd_wrapper in self.__wrappers.iteritems(): if wrapper is fd_wrapper: fd = cur_fd break # Remove the wrapper from the lists with self.__wrapper_lock: if fd: if fd in self.__wrappers: del self.__wrappers[fd] if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) # If the exception was not raised in the close method, # try to gracefully close the file descriptor. Otherwise, # let garbage collection clean it up. if method != "close": try: wrapper.close() except: pass def __attempt_wrappers(self): """ Helper method for the main thread. Attempt to connect unconnected wrappers """ with self.__wrapper_lock: sock_list = self.__disconnected_wrappers for wrapper in sock_list: if not self.__wrap_function(wrapper, "connected"): self.__wrap_function(wrapper, "_handle_attempt") def __check_stop(self): """ Check if there are any registered IOWrappers, and if not, stop running after 30 seconds. Also stop if the parent thread ended to make sure the interpreter does not keep running just for the IOManager. """ if not self.__parent_thread.is_alive(): global _iom_shutdown self.__logger.info("Parent thread ended. Stopping IOManager.") _iom_shutdown = True self.__running = False if not self.__wrappers and not self.__disconnected_wrappers and time.time( ) > self.__empty_time: self.__logger.info("No IOWrappers registered. Stopping IOManager") self.__running = False elif self.__wrappers or self.__disconnected_wrappers: self.__empty_time = time.time() + 30 def __cleanup(self): """ This method will close all open file descriptor wrappers, and clean up the lists of the IOManager. """ wrappers = copy.copy(self.__wrappers) num = len(wrappers) for fd in wrappers: wrappers[fd].close() self.__wrappers = {} self.__disconnected_wrappers = [] self.__logger.info("Closed %d IOWrappers" % num) os.close(self.__wakeup_read) os.close(self.__wakeup_write) def __find_wrapper(self, fd): """ Return the file descriptor wrapper object belonging to the specified file descriptor """ with self.__wrapper_lock: if fd in self.__wrappers: return self.__wrappers[fd] with self.__poll_lock: if self.__poll.is_known(fd): self.__logger.warning("Cannot find wrapper object belonging to " \ "file descriptor %d. Unregistering." % fd) # Try to unregister with poller self.__poll.unregister(fd) return None def __validate_wrapper(self, wrapper): """ This method validates the wrapper to be an instance of the IOWrapper base class, making sure it implements the required methods. It will raise an exception if the wrapper is invalid. """ if not isinstance(wrapper, IOWrapper): raise Exception( "Only subclasses of IOWrapper should be registered in the IOManager" ) def __wakeup(self): """ This method wakes up the current call to poll by writing a zero byte to the wake-up pipe. This will wake the poller and increase the speed at which the lock is released. """ os.write(self.__wakeup_write, "\x00") ############################################################################ ## Public API ## ############################################################################ ## Functions that can and should be called by IOWrapper subclasses ## ## to register and unregister themselves with the IOManager ## ############################################################################ def register(self, wrapper): """ Register a connected file descriptor wrapper """ self.__validate_wrapper(wrapper) fd = self.__wrap_function(wrapper, "fileno") if not type(fd) is type(0): self.__logger.error( "Cannot register IOWrapper with file descriptor %s" % repr(fd)) return False with self.__wrapper_lock: if fd in self.__wrappers: self.__logger.error( "File descriptor %d already registered in IOManager" % fd) return False self.__logger.debug( "Registering IOWrapper with file descriptor %d" % fd) if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) self.__wrappers[fd] = wrapper self.__wakeup() with self.__poll_lock: self.__poll.register(fd, True, False, True) return True def register_unconnected(self, wrapper): """ Register a disconnected file descriptor wrapper that needs its _attempt method to be called at a regular interval. """ self.__validate_wrapper(wrapper) with self.__wrapper_lock: self.__logger.debug("Registering disconnected IOWrapper") self.__disconnected_wrappers.append(wrapper) return True def unregister(self, wrapper): """ Unregister a file descriptor wrapper, usually because it has been disconnected or closed. """ self.__validate_wrapper(wrapper) found = False fd = self.__wrap_function(wrapper, "fileno") if not type(fd) is type(0): self.__logger.error( "Cannot unregister IOWrapper with file descriptor %s" % repr(fd)) return False with self.__wrapper_lock: self.__wakeup() self.__read_list.discard(fd) self.__write_list.discard(fd) with self.__poll_lock: if fd in self.__wrappers: found = True self.__logger.debug( "Unregistering IOWrapper with file descriptor %d" % fd) del self.__wrappers[fd] if found: self.__poll.unregister(fd) return found def unregister_unconnected(self, wrapper): """ Unregister an unconnected file descriptor wrapper, usually because it has succesfully opened/connected or because it is no longer needed. """ self.__validate_wrapper(wrapper) with self.__wrapper_lock: if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) return True else: self.__logger.warning("Cannot unregister unregistered IOWrapper " \ "%s" % repr(wrapper)) return False def set_writable(self, wrapper, writable): """ Marks the file descriptor as writable in the poller """ self.__validate_wrapper(wrapper) fd = self.__wrap_function(wrapper, "fileno") if type(fd) is type(0): self.__wakeup() with self.__poll_lock: try: self.__poll.modify(fd, True, writable, True) except IOError as e: if e.errno == errno.EBADF: self.__logger.warning("Invalid File Descriptor %d in " \ "%s. Closing IOWrapper." \ % (fd, str(wrapper))) self.__wrap_function(wrapper, "close") else: raise return True else: self.__logger.error( "Cannot modify IOWrapper with file descriptor %s" % fd) return False def stop(self): """ Stop the IOManager. This will set the running flag to false, which should terminate the IOManager thread in a short timespan. """ self.__running = False def running(self): return self.__running
class _IOManager(threading.Thread): """ ############################################################################ ## The IOManager ## ############################################################################ The _IOManager class is a class that is used to manage objects that wrap file descriptors in some way, such as network sockets, pipes, pty's and files. The _IOManager class should never be instantiated directly but should always be accessed through the use of the IOManager() function that makes it sort-of singleton. The reason the __new__ method is not used instead is that that occasionally gives problems when registering new objects, due to the threaded nature of the object. The _IOManager runs in a loop, polling all the registered file descriptors. For each file descriptor, functions are called to notify them that there is data to read or write. """ __instance = None __signal_handlers = {} profile = None ####################################################################### # Class Methods ####################################################################### # The following methods will register and unregister signal handlers # and handle the signals themselves. ####################################################################### @classmethod def signal_handler(cls, signum, frame): """ This method can be registered as a signal handler and will terminate the IOManager thread on the signal and call the original signal handler. """ global _iom_instance # Find name of signal signame = str(signum) for key in signal.__dict__.keys(): if key.startswith("SIG") and getattr(signal, key) == signum: signame = key break try: logger = _iom_instance._IOManager__logger logger.warning("Caught signal %s. Terminating IOManager" % signame) except: print "Caught signal %s. Terminating IOManager" % signame original_handler = None if signal in cls.__signal_handlers: original_handler = cls.__signal_handlers[signal] clear_IOM() if original_handler: original_handler(signal, frame) else: sys.exit(1) @classmethod def set_signal_handlers(cls, signals): """ This method is called whenever a IOManager is instantiated. It sets the signal_handler to be the handler for a set of signals to stop the thread when they arrive. """ for sig in signals: try: original_handler = signal.getsignal(sig) if original_handler == cls.signal_handler: continue signal.signal(sig, cls.signal_handler) cls.__signal_handlers[sig] = original_handler except Exception as e: pass @classmethod def restore_signal_handlers(cls): """ This method is called when a IOManager thread ends to restore the original behavior. """ signals = cls.__signal_handlers.keys() for sig in signals: try: signal.signal(sig, cls.__signal_handlers[sig]) except Exception as e: pass cls.__signal_handlers = {} ####################################################################### # Constructor and destructor ####################################################################### def __del__(self): """ Stop the IOManager thread when the object is deleted. """ self.stop() def __init__(self): """ Set up the IOManager thread and start it. """ super(_IOManager, self).__init__() self.__poll = iopoll.IOPoll() self.__et = self.__poll.set_edge_triggered(True) self.__wrappers = {} self.__disconnected_wrappers = [] self.__running = True self.__wrapper_lock = threading.RLock() self.__poll_lock = threading.RLock() self.__logger = logging.getLogger("Borg.Brain.Util.IOManager") self.__logger.addHandler(nullhandler.NullHandler()) self.__empty_time = time.time() + 10 self.__next_tick = time.time() + 0.1 self.__parent_thread = threading.current_thread() self.__read_list = set([]) self.__write_list = set([]) self.__ticker = Ticker(10) self.__saved_time = 0 # Set up wake up pipe in non-blocking mode self.__wakeup_read, self.__wakeup_write = os.pipe() fl = fcntl.fcntl(self.__wakeup_read, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_read, fcntl.F_SETFL, fl | os.O_NONBLOCK) fl = fcntl.fcntl(self.__wakeup_write, fcntl.F_GETFL) fcntl.fcntl(self.__wakeup_write, fcntl.F_SETFL, fl | os.O_NONBLOCK) self.__poll.register(self.__wakeup_read, True, False, True) # Start IOManager thread self.start() ############################################################################ ## IOManager thread ## ############################################################################ def run(self): """ This method will be called when the thread starts. It wraps the _run method to make it possible to profile the IOManager. """ if not self.__class__.profile is None: import cProfile cminstance = self cProfile.runctx("self._run()", globals(), locals(), self.__class__.profile) else: self._run() def _run(self): """ This method is the main thread of the IOManager. It polls all active IOWrappers and calls their read and write handlers when data can be read or written. It will also try to connect any unconnected IOWrappers that have been registered. """ global _iom_instance self.__logger.info("IOManager started") timeout = 0 while self.__running: # Poll for events if self.__et: self.__poll_et() else: self.__poll_lt() # Perform IOWrapper checking on the actual # frequency of the ticker, not each time # there is data to read or write if time.time() >= self.__next_tick: self.__next_tick += self.__ticker.get_ticktime() # Call the handle loop function of each connected wrapper self.__handle_loop() # Attempt to connect all IOWrappers self.__attempt_wrappers() # Check if IOManager should keep running self.__check_stop() # Allow for control to be interrupted if other threads require # attention. Do not waste time on this so use a very short sleep time.sleep(0.0001) self.__cleanup() clear_IOM() self.__logger.debug("Savings of wakeup pipe: %f seconds" % self.__saved_time) self.__logger.info("IOManager stopped") ############################################################################ ## Helper methods ## ############################################################################ ## The following methods are (private) helper methods for the IOManager, ## ## handling events and validating wrappers. ## ############################################################################ def __poll_lt(self): """ Helper method for the main thread. Process all events from the poller: call the respective read and write handlers. This version of the method is for Level Triggered behavior, such as kqueue and poll. It will call the read/write handlers of each FD that is ready once. """ timeout = self.__ticker.end_tick(False) self.__last_tick_end = time.time() + timeout with self.__poll_lock: events = self.__poll.poll(timeout * 1000) self.__ticker.start_tick() # Process events on the IOWrappers for fd, event in events: # If this was the wake-up pipe, skip if self.__check_wakeup(fd, len(events)): continue wrapper = self.__find_wrapper(fd) if not wrapper: continue if event & iopoll.IO_ERROR: self.__wrap_function(wrapper, "close") if event & iopoll.IO_READ: self.__wrap_function_io(wrapper, "_handle_read") if event & iopoll.IO_WRITE: self.__wrap_function_io(wrapper, "_handle_write") def __poll_et(self): """ Helper method for the main thread. Process all events from the poller: call the respective read and write handlers. This version of the method is for Edge Triggered behavior, when using epoll. It will maintain a set of file descriptors ready for reading and writing. On each loop, it will check if those sets are empty. If they are, a call to the poll function is made with the timeout returned by the ticker, to obtain the set frequency. If the sets are not empty, the lists are quickly updated by calling poll with a timeout of 0. The ready lists are updated with the events returned by the call to poll. """ if self.__read_list or self.__write_list: timeout = 0 else: timeout = self.__ticker.end_tick(False) self.__last_tick_end = time.time() + timeout with self.__poll_lock: events = self.__poll.poll(timeout * 1000) self.__ticker.start_tick() # Update the lists for reading and writing, and # remove/close the file descriptors with an # error state. error_list = set([]) for fd, event in events: if fd in error_list: continue if self.__check_wakeup(fd, len(events)): continue if event & iopoll.IO_ERROR: self.__read_list.discard(fd) self.__write_list.discard(fd) error_list.add(fd) wrapper = self.__find_wrapper(fd) if wrapper: self.__wrap_function(wrapper, "close") continue if event & iopoll.IO_READ: self.__read_list.add(fd) if event & iopoll.IO_WRITE: self.__write_list.add(fd) # Perform IO self.__perform_io_et(self.__ticker.end_tick(False) * 0.9) def __perform_io_et(self, max_time): """ After polling and updating of the ready lists has completed the function iterates over the file descriptors ready for reading and writing. The _handle_read and _handle_write of each file descriptor wrapper will be called round-robin in a loop, for as long as the current tick lasts. If the file descriptors become non-readable or non-writable before this happens, the function simply exits and on will continue in the next loop. """ # Perform IO on ready file descriptors start = time.time() while time.time() < start + max_time: if not self.__read_list and not self.__write_list: break # Handle file descriptors ready for reading read_fds = sorted(self.__read_list) for fd in read_fds: wrapper = self.__find_wrapper(fd) if not wrapper: self.__read_list.discard(fd) continue try: self.__wrap_function_io(wrapper, "_handle_read") except IOWrapperEnd: self.__read_list.discard(fd) # Handle file descriptors ready for writing write_fds = sorted(self.__write_list) for fd in write_fds: wrapper = self.__find_wrapper(fd) if not wrapper: self.__write_list.discard(fd) continue try: self.__wrap_function_io(wrapper, "_handle_write") except IOWrapperEnd: self.__write_list.discard(fd) def __check_wakeup(self, fd, num): """ This method checks if the fd is the wakeup pipe, and if so, reads the data from that pipe. """ if fd == self.__wakeup_read: if num == 1: # Was woken up, count savings. self.__last_tick_end # if the time the poll would have ended if it was # not awoken by the wakeup pipe. self.__saved_time += self.__last_tick_end - time.time() try: # Empty pipe while True: os.read(self.__wakeup_read, 1) except OSError as err: if err.errno == errno.EAGAIN: # Resource Temporarily Unavailable, occurs when there is # no more data to read. Normal behavior. return True # Other error, something else is wrong. Should not happen. raise return False def __handle_loop(self): """ This method calls the handle_loop function of each wrapper, once every true tick. This should be at the frequency of the ticker, about 10 times per second. """ with self.__wrapper_lock: for wrapper in self.__wrappers.values(): self.__wrap_function(wrapper, "_handle_loop") def __wrap_function(self, wrapper, funcname): """ This method calls a function on the wrapper in a try/except block. This is to make sure the IOManager will keep on running even when exceptions occur in one of the wrappers. This will prevent from the complete system to collapse when one IOWrapper fails. """ try: function = getattr(wrapper, funcname) return function() except Exception as err: self.__handle_exception(wrapper, funcname, err) return None def __wrap_function_io(self, wrapper, funcname): """ This method calls a function on the wrapper in a try/except block. This is to make sure the IOManager will keep on running even when exceptions occur in one of the wrappers. This will prevent from the complete system to collapse when one IOWrapper fails. The difference with this function and __wrap_function is that this one is aimed at the _handle_read and _handle_write methods. It will catch the IOWrapperEnd exception and raise it again, to indicate that the fd can no longer be read or written. """ try: function = getattr(wrapper, funcname) return function() except IOWrapperEnd: # Expected exception when no more data can be read or written if self.__et: # poll_et needs this to update ready list raise else: # poll_lt doesn't need this return False except Exception as err: self.__handle_exception(wrapper, funcname, err) return None def __handle_exception(self, wrapper, method, exception): """ This method is called whenever an exception has occured in any of the wrappers functions. It outputs the message and removes the wrapper from the IOManager. """ # First print the normal exception output # traceback.print_exc() self.__logger.critical( "Exception raised in %s in IOWrapper %s. " "Removing IOWrapper from IOManager. The " "exception is: %s" % (repr(wrapper), method, repr(exception)) ) fd = None # Get fd from object if method != "fileno": try: fd = wrapper.fileno() except: pass if not fd: # Look through the list of wrappers to find the wrapper for cur_fd, fd_wrapper in self.__wrappers.iteritems(): if wrapper is fd_wrapper: fd = cur_fd break # Remove the wrapper from the lists with self.__wrapper_lock: if fd: if fd in self.__wrappers: del self.__wrappers[fd] if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) # If the exception was not raised in the close method, # try to gracefully close the file descriptor. Otherwise, # let garbage collection clean it up. if method != "close": try: wrapper.close() except: pass def __attempt_wrappers(self): """ Helper method for the main thread. Attempt to connect unconnected wrappers """ with self.__wrapper_lock: sock_list = self.__disconnected_wrappers for wrapper in sock_list: if not self.__wrap_function(wrapper, "connected"): self.__wrap_function(wrapper, "_handle_attempt") def __check_stop(self): """ Check if there are any registered IOWrappers, and if not, stop running after 30 seconds. Also stop if the parent thread ended to make sure the interpreter does not keep running just for the IOManager. """ if not self.__parent_thread.is_alive(): global _iom_shutdown self.__logger.info("Parent thread ended. Stopping IOManager.") _iom_shutdown = True self.__running = False if not self.__wrappers and not self.__disconnected_wrappers and time.time() > self.__empty_time: self.__logger.info("No IOWrappers registered. Stopping IOManager") self.__running = False elif self.__wrappers or self.__disconnected_wrappers: self.__empty_time = time.time() + 30 def __cleanup(self): """ This method will close all open file descriptor wrappers, and clean up the lists of the IOManager. """ wrappers = copy.copy(self.__wrappers) num = len(wrappers) for fd in wrappers: wrappers[fd].close() self.__wrappers = {} self.__disconnected_wrappers = [] self.__logger.info("Closed %d IOWrappers" % num) os.close(self.__wakeup_read) os.close(self.__wakeup_write) def __find_wrapper(self, fd): """ Return the file descriptor wrapper object belonging to the specified file descriptor """ with self.__wrapper_lock: if fd in self.__wrappers: return self.__wrappers[fd] with self.__poll_lock: if self.__poll.is_known(fd): self.__logger.warning( "Cannot find wrapper object belonging to " "file descriptor %d. Unregistering." % fd ) # Try to unregister with poller self.__poll.unregister(fd) return None def __validate_wrapper(self, wrapper): """ This method validates the wrapper to be an instance of the IOWrapper base class, making sure it implements the required methods. It will raise an exception if the wrapper is invalid. """ if not isinstance(wrapper, IOWrapper): raise Exception("Only subclasses of IOWrapper should be registered in the IOManager") def __wakeup(self): """ This method wakes up the current call to poll by writing a zero byte to the wake-up pipe. This will wake the poller and increase the speed at which the lock is released. """ os.write(self.__wakeup_write, "\x00") ############################################################################ ## Public API ## ############################################################################ ## Functions that can and should be called by IOWrapper subclasses ## ## to register and unregister themselves with the IOManager ## ############################################################################ def register(self, wrapper): """ Register a connected file descriptor wrapper """ self.__validate_wrapper(wrapper) fd = self.__wrap_function(wrapper, "fileno") if not type(fd) is type(0): self.__logger.error("Cannot register IOWrapper with file descriptor %s" % repr(fd)) return False with self.__wrapper_lock: if fd in self.__wrappers: self.__logger.error("File descriptor %d already registered in IOManager" % fd) return False self.__logger.debug("Registering IOWrapper with file descriptor %d" % fd) if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) self.__wrappers[fd] = wrapper self.__wakeup() with self.__poll_lock: self.__poll.register(fd, True, False, True) return True def register_unconnected(self, wrapper): """ Register a disconnected file descriptor wrapper that needs its _attempt method to be called at a regular interval. """ self.__validate_wrapper(wrapper) with self.__wrapper_lock: self.__logger.debug("Registering disconnected IOWrapper") self.__disconnected_wrappers.append(wrapper) return True def unregister(self, wrapper): """ Unregister a file descriptor wrapper, usually because it has been disconnected or closed. """ self.__validate_wrapper(wrapper) found = False fd = self.__wrap_function(wrapper, "fileno") if not type(fd) is type(0): self.__logger.error("Cannot unregister IOWrapper with file descriptor %s" % repr(fd)) return False with self.__wrapper_lock: self.__wakeup() self.__read_list.discard(fd) self.__write_list.discard(fd) with self.__poll_lock: if fd in self.__wrappers: found = True self.__logger.debug("Unregistering IOWrapper with file descriptor %d" % fd) del self.__wrappers[fd] if found: self.__poll.unregister(fd) return found def unregister_unconnected(self, wrapper): """ Unregister an unconnected file descriptor wrapper, usually because it has succesfully opened/connected or because it is no longer needed. """ self.__validate_wrapper(wrapper) with self.__wrapper_lock: if wrapper in self.__disconnected_wrappers: self.__disconnected_wrappers.remove(wrapper) return True else: self.__logger.warning("Cannot unregister unregistered IOWrapper " "%s" % repr(wrapper)) return False def set_writable(self, wrapper, writable): """ Marks the file descriptor as writable in the poller """ self.__validate_wrapper(wrapper) fd = self.__wrap_function(wrapper, "fileno") if type(fd) is type(0): self.__wakeup() with self.__poll_lock: try: self.__poll.modify(fd, True, writable, True) except IOError as e: if e.errno == errno.EBADF: self.__logger.warning( "Invalid File Descriptor %d in " "%s. Closing IOWrapper." % (fd, str(wrapper)) ) self.__wrap_function(wrapper, "close") else: raise return True else: self.__logger.error("Cannot modify IOWrapper with file descriptor %s" % fd) return False def stop(self): """ Stop the IOManager. This will set the running flag to false, which should terminate the IOManager thread in a short timespan. """ self.__running = False def running(self): return self.__running
import director import database import numpy from ticker import Ticker import trader import time sortedDict = director.process("stocks", "hot", 50) database.uploadData(sortedDict, "stocks", "10-20") tickers = [] symbols = list(sortedDict.keys()) mentions = [a.mentions for a in sortedDict.values()] sentiment = [a.sentiment for a in sortedDict.values()] for i in range(len(symbols)): tickers.append(Ticker(str(symbols[i]), mentions[i], sentiment[i])) stdMentions = numpy.std(mentions) stdSentiment = numpy.std(sentiment) sum = 0 for i in mentions: sum += i avg1 = sum / len(mentions) sum = 0 for i in sentiment: sum += i avg2 = sum / len(sentiment) for tick in tickers: z1 = (tick.mentions - avg1) / stdMentions z2 = (tick.sentiment - avg2) / stdSentiment tick.setZScore((z1 + z2) / 2) tickers.sort(key=lambda x: x.zscore, reverse=True) staged = [] for i in range(len(symbols)):
def ticker(self, conn): ticker = Ticker(conn) ticker.start()
def initializeData(self, fileName): """Loads and initializes data from a csv file. Args: filename: The csv data file """ fileCopy = [] # Printing out each row in order to show what is there with open(fileName, 'rU') as csvfile: reader = csv.reader(csvfile, delimiter=',', dialect=csv.excel_tab) for row in reader: fileCopy.append(row) csvfile.close() tickerNamesCounter = 0 # Isolate the ticker names into the tickerNames list for x in fileCopy[0]: if x == '': pass else: self.tickerNames.append(x) print "The tickers that you have listed within your file include:" for x in self.tickerNames: print x # Associate performance data with each ticker via a dictionary # Dictionary will map datetime objects (for the date) to floats (for performance) # enumerate gives you an index value for each ticker in tickerNames, so we can know both # the index and name of the ticker for index, ticker in enumerate(self.tickerNames): # a temporary map to hold a ticker's date/performance data performance_data_dict = {} # For each ticker, we go through the file checking the appropriate columns # You'll notice that corresponding columns of data for each ticker in the tickerNames list # is just # Date: 3*(index of ticker name in tickerNames) # Performance: 3*(index of ticker name in tickerNames) + 1 # # Math is a beautiful thing for line in fileCopy[2:]: date_string = line[3 * index] if date_string == '': continue #create a datetime object out of the datestring in the csv file date = datetime.strptime(date_string, '%m/%d/%y') performance = float(line[3 * index + 1]) # add the date and performance to the dictionary performance_data_dict[date] = performance # create a new ticker object, and store it in the tickerObjects dictionary which maps # ticker names (which are strings) to ticker objects (which are instances of the Ticker class) ticker_obj = Ticker(ticker, performance_data_dict) self.tickerObjects[ticker] = ticker_obj
class Server(asyncore.dispatcher): def __init__(self, parameters, is_standalone): self.parameters = parameters self.is_standalone = is_standalone asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(("", config.port)) self.listen(5) self.login = config.login self.clients = [] self.games = [] if "admin_only" in parameters: self.nb_games_max = 1 self.nb_clients_max = 20 else: self.nb_games_max = 10 self.nb_clients_max = 40 next_id = 0 def get_next_id(self, increment=True): if increment: self.next_id += 1 return self.next_id else: return self.next_id + 1 def handle_connect(self): pass def handle_read(self): pass def handle_accept(self): ConnectionToClient(self, self.accept()) def _cleanup(self): for c in self.clients[:]: if c not in asyncore.socket_map.values(): self.clients.remove(c) if self.games and not self.clients: self.games = [] def log_status(self): self._cleanup() info("%s players (%s not playing), %s games", len(self.clients), len(self.players_not_playing()), len([g for g in self.games if g.started])) def _is_admin(self, client): return client.address[0] == "127.0.0.1" and client.login == self.login def remove_client(self, client): info("disconnect: %s" % client.login) client.is_disconnected = True if client in self.clients: # not anonymous self.clients.remove(client) for c in self.players_not_playing(): c.send_msg([client.login, 4259]) # ... has just disconnected self.update_menus() if isinstance(client.state, Playing): client.cmd_abort_game([]) if self._is_admin(client) and not self.is_standalone: info("the admin has disconnected => close the server") sys.exit() self.log_status() def handle_write(self): pass def handle_close(self): try: debug("Server.handle_close") except: pass sys.exit() def handle_error(self): try: debug("Server.handle_error %s", sys.exc_info()[0]) except: pass if sys.exc_info()[0] in [SystemExit, KeyboardInterrupt]: sys.exit() else: try: exception("Server.handle_error") except: pass def can_create(self, client): if "admin_only" in self.parameters: return self._is_admin(client) else: return len([g for g in self.games if g.started]) < self.nb_games_max def unregister(self): try: info("unregistering server...") s = urllib.urlopen(UNREGISTER_URL + "?ip=" + self.ip).read() except: s = "couldn't access to the metaserver" if s: warning("couldn't unregister from the metaserver (%s)", s[:80]) ip = "" def _get_ip_address(self): try: self.ip = urllib.urlopen(WHATISMYIP_URL).read().strip() if not re.match("^[0-9.]{7,40}$", self.ip): self.ip = "" except: self.ip = "" if not self.ip: warning("could not get my IP address from %s", WHATISMYIP_URL) _first_registration = True def _register(self): try: s = urllib.urlopen(REGISTER_URL + "?version=%s&login=%s&ip=%s&port=%s" % (compatibility_version(), self.login, self.ip, config.port)).read() except: s = "couldn't access to the metaserver" if s: warning("couldn't register to the metaserver (%s)", s[:80]) else: info("server registered") def register(self): if self._first_registration: self._get_ip_address() self._first_registration = False self._register() def _start_registering(self): self.ticker = Ticker(REGISTER_INTERVAL, self.register) self.ticker.start() def startup(self): if "no_metaserver" not in self.parameters: self._start_registering() info("server started") asyncore.loop() def update_menus(self): for c in self.clients: c.send_menu() def available_players(self): return [x for x in self.clients if isinstance(x.state, InTheLobby)] def game_admins(self): return [x for x in self.clients if isinstance(x.state, OrganizingAGame)] def players_not_playing(self): return [x for x in self.clients if not isinstance(x.state, Playing)] def get_client_by_login(self, login): for c in self.clients: if c.login == login: return c def get_game_by_id(self, ident): ident = int(ident) for o in self.games: if o.id == ident: return o
def _start_registering(self): self.ticker = Ticker(REGISTER_INTERVAL, self.register) self.ticker.start()
def consumer(self, marketData): connection = elasticsearch.Elasticsearch(self.esHost) self.ensure(connection) self.docMappings(connection) dataSet = json.loads(marketData) item = {} for infoPoint in dataSet: try: channel = str(infoPoint["channel"]) regex = "ok_sub_(spotusd|futureusd)_(b|l)tc_(.[A-Za-z0-9_]+)" search = re.search(regex, channel) if search.group(1) == "futureusd": isFuture = True else: isFuture = False currencyPair = str(search.group(2)) + "tc_usd" self.count = self.count + 1 if self.count % 100 == 0: print ("PROCESSED " + str(self.count) + " DATA POINTS SO FAR...") if search.group(3) == "index": myindex = FutureIndex() dto = myindex.getFutureIndexDto(infoPoint, currencyPair) dto["exchange"] = "OKCOIN" self.postDto(dto, connection, "future_price_index") elif "depth" in channel: mybook = Orderbook() dto = mybook.getDepthDtoList(infoPoint, currencyPair, isFuture) for item in dto: item["websocket_name"] = channel item["is_future"] = isFuture if isFuture == True: check = re.search("depth_(this_week|next_week|quarter)_(20|60)", search.group(3).strip()) item["contract_type"] = str(check.group(1)) item["depth"] = str(check.group(2)) else: item["contract_type"] = "spot" depthSearch = re.search("depth_(20|60)", search.group(3).strip()) item["depth"] = depthSearch.group(1) item["exchange"] = "OKCOIN" self.postDto(item, connection, "orderbook") elif "ticker" in channel and "data" in infoPoint: myticker = Ticker() if isFuture == False: dto = myticker.getTickerDto(infoPoint, currencyPair) self.postDto(dto, connection, "ticker") elif isFuture == True: dto = myticker.getFutureTickerDto(infoPoint, channel, currencyPair) dto["exchange"] = "OKCOIN" self.postDto(dto, connection, "future_ticker") elif "trade" in channel: mytrade = Trade() if "data" in infoPoint: dtoList = mytrade.getCompletedTradeDtoList(infoPoint, currencyPair) for item in dtoList: item["is_future"] = "futureusd" in channel item["websocket_name"] = channel item["exchange"] = "OKCOIN" self.postDto(item, connection, "completed_trades") elif "kline" in channel: myklein = KlineCandle() if "data" in infoPoint: if len(infoPoint["data"]) > 1: for klineData in infoPoint["data"]: if type(klineData) is list: klineDto = myklein.getKlineDto(klineData, currencyPair, channel) klineDto["exchange"] = "OKCOIN" klineDto["is_future"] = isFuture klineDto["websocket_name"] = channel else: klineDto = myklein.getKlineDto(infoPoint["data"], currencyPair, channel) self.postDto(klineDto, connection, "kline_candles") except: raise
except ValueError: print "Warning: Unable to parse configuration file." else: # looks like no configuration exists -> create default one with open(conffile, 'w') as f: f.write(json.dumps(settings, sort_keys=True, indent=4) + "\n") nfc_broadcast = NFCBroadcast() controller = Controller(settings, nfc_broadcast) bluetooth_receiver = BluetoothReceiver( controller.bluetooth_available, controller.new_transaction_via_bluetooth) tx_monitor = TxMonitor(controller.new_transaction_received) ticker_settings = settings['exchange_rate_ticker'] ticker = Ticker(ticker_settings['source'], ticker_settings['currency'], ticker_settings['url'], ticker_settings['fields'], ticker_settings['interval'], controller.exchange_rate_updated) # start various threads nfc_broadcast.start() bluetooth_receiver.start() tx_monitor.start() ticker.start() # enter main event loop controller.run() # clean up - NFCBroadcast uses an external process, # which we need to terminate nfc_broadcast.shutdown()
def test_strategy(definition, strategy): sapp = datetime.datetime.now() # initialize tickers t = Ticker(**definition) t.strategy = strategy tickers = {1: t} # fill the tickers with 'historical data' start = datetime.datetime(2003, 6, 16) end = datetime.datetime(2003, 6, 23) ticks = tick_list("ES", start, end) first_day = ticks[0][0].day index = 0 for tick in ticks: if not tick[0].day == first_day: break else: t.ticks.append(tick) index += 1 # start 'engines' tick_queue = LinkedBlockingQueue() response_handler = IBResponseHandler(tick_queue) connection = SimConnection(response_handler) client = IBClient(connection, tickers) # initialize trader trader = TradingEngine(tick_queue, tickers, client) # initialize client client.start(trader) # fill the queue and wait for the queue to be empty for tick in ticks[index:]: qtick = 1, tick[0], tick[1] tick_queue.put(qtick) # time.sleep(0.002) # to ease a little on the CPU usage # wait unitl the queue is consumed while tick_queue.size(): time.sleep(1) # now show some statistics report = {} order_map = client.order_map[1] # ticker_id = 1 # check order validity, entry, exit, entry, exit, etc. previous_signal = None invalid_sequence = 0 for order_id in order_map: order_entry = client.account["_orders"][order_id] signal = order_entry["signal"] if previous_signal: if previous_signal.startswith("entry") and not signal.startswith("exit"): invalid_sequence += 1 if previous_signal.startswith("exit") and not signal.startswith("entry"): invalid_sequence += 1 previous_signal = signal if invalid_sequence: report["valid"] = True else: report["valid"] = False # check number of long and short and order result if not invalid_sequence: entry_long = 0 entry_short = 0 long_result = [] short_result = [] all_result = [] ratio = 50 rt_comm = 4 # round trip commission for i in range(len(order_map) - 1): if i < len(order_map) - 1: entry_order_id = order_map[i] exit_order_id = order_map[i + 1] entry_order = client.account["_orders"][entry_order_id] exit_order = client.account["_orders"][exit_order_id] entry_value = entry_order["fill_value"] exit_value = exit_order["fill_value"] signal = entry_order["signal"] if signal.startswith("entry_long"): entry_long += 1 value = exit_value - entry_value long_result.append(value) all_result.append(value) if signal.startswith("entry_short"): entry_short += 1 value = entry_value - exit_value short_result.append(value) all_result.append(value) long_result = [r * ratio for r in long_result] report["long_results"] = long_result long_comm = [r - rt_comm for r in long_result] report["long_results_with_commission"] = long_comm short_result = [r * ratio for r in short_result] report["short_results"] = short_result short_comm = [r - rt_comm for r in short_result] report["short_results_with_commission"] = short_comm all_result = [r * ratio for r in all_result] report["all_results"] = all_result all_comm = [r - rt_comm for r in all_result] report["all_results_with_commission"] = all_comm report["long_trades"] = entry_long report["short_trades"] = entry_short report["sum_all_results"] = sum(all_result) report["sum_all_results_with_commission"] = sum(all_comm) report["sum_long_results"] = sum(long_result) report["sum_long_results_with_commission"] = sum(long_comm) report["sum_short_results"] = sum(short_result) report["sum_short_results_with_commission"] = sum(short_comm) if all_result: avg_all_res = sum(all_result) / len(all_result) avg_all_res_comm = sum(all_comm) / len(all_comm) report["average_all_results"] = avg_all_res report["average_all_results_with_commission"] = avg_all_res_comm else: report["average_all_results"] = None report["average_all_results_with_commission"] = None if long_result: avg_long_res = sum(long_result) / len(long_result) avg_long_res_comm = sum(long_comm) / len(long_comm) report["average_long_results"] = avg_long_res report["average_long_results_with_commission"] = avg_long_res_comm else: report["average_long_results"] = None report["average_long_results_with_commission"] = None if short_result: avg_short_res = sum(short_result) / len(short_result) avg_short_res_comm = sum(short_comm) / len(short_comm) report["average_short_results"] = avg_short_res report["average_short_results_with_commission"] = avg_short_res_comm else: report["average_short_results"] = None report["average_short_results_with_commission"] = None # calculate total capacity capacity = 0 previous_tick_value = 0 for tick in ticks[index:]: tick_value = tick[1] if previous_tick_value: cap = abs(tick_value - previous_tick_value) capacity += cap previous_tick_value = tick_value total_capacity = capacity * ratio report["total_capacity"] = total_capacity res_for_cap = sum(all_comm) * 100 / total_capacity report["result_for_capacity_percentage"] = res_for_cap eapp = datetime.datetime.now() report["analysis_time"] = eapp - sapp return report
class Display(object): def __init__(self): self.ticker = Ticker() self.ticker.update_quote() pygame.init() # Set the width and height of the screen [width, height] self.size = (SCREEN_WIDTH, SCREEN_HEIGHT) self.screen = pygame.display.set_mode(self.size) pygame.display.set_caption("Ticker LLC") self.setup_font() # Loop until the user clicks the close button. self.done = False # Used to manage how fast the screen updates self.clock = pygame.time.Clock() self.time_elapsed = pygame.time.get_ticks() self.displayList = [] def setup_font(self): if pygame.font: self.fontBigSize = 34 self.fontSmallSize = 24 self.fontBig = pygame.font.Font(None, self.fontBigSize) self.fontSmall = pygame.font.Font(None, self.fontSmallSize) #self.bigText = fontBig.render("big font", 1, (10,10,10)) #self.smallText = fontSmall.render("small font", 1, (10,10,10)) def elapsed(self): current_time = pygame.time.get_ticks() if current_time - self.time_elapsed > 10000 : print "10sec elapsed" self.time_elapsed = current_time self.ticker.update_quote() self.displayList = [] for elem in self.ticker.ticker_dict_list : name = elem["Name"] symbol = elem["Symbol"] price = elem["LastTradePriceOnly"] self.displayList.append((elem["Name"], elem["Symbol"], elem["LastTradePriceOnly"])) print symbol + ": " + price def text_render(self): initX = 50 initY = 50 offsetX = 0 offsetY = 0 ticker_list = self.ticker.ticker_dict_list for (name, symbol, price) in self.displayList: bigText = self.fontBig.render(name, 1, (10,10,10)) self.screen.blit(bigText, (initX+offsetX, initY+offsetY)) offsetX += 0 offsetY += self.fontBigSize smallStr = symbol + " " + price smallText = self.fontSmall.render(smallStr, 1, (10,10,10)) self.screen.blit(smallText, (initX+offsetX, initY+offsetY)) offsetX += 0 offsetY += self.fontSmallSize + self.fontBigSize def display_loop(self): # -------- Main Program Loop ----------- while not self.done: # --- Main event loop for event in pygame.event.get(): # User did something if event.type == pygame.QUIT: # If user clicked close self.done = True # Flag that we are done so we exit this loop #print "done" # --- Game logic should go here # --- Drawing code should go here # First, clear the screen to white. Don't put other drawing commands # above this, or they will be erased with this command. self.screen.fill(WHITE) self.text_render() self.elapsed() # --- Go ahead and update the screen with what we've drawn. pygame.display.flip() # --- Limit to 60 frames per second self.clock.tick(60) # Close the window and quit. # If you forget this line, the program will 'hang' # on exit if running from IDLE. pygame.quit()
def main(): # Read configuration common_config = Config(os.path.join('..', '..', 'config', 'common.config')) # Read image preprocessing configuration tracker_config = Config( os.path.join('..', '..', 'config', 'tracker.config')) # Get output folder output_folder = common_config['Output']['Folder'] # Use multiprocessing is_parallel = tracker_config['Parallel Computing']['Enabled_b'] # Read preprocessed data image_sequence = upload_sequence(os.path.join('..', '..', 'output', 'preprocessing', output_folder), use_shared_memory=is_parallel) try: init_path = tracker_config['Initialization']['Path'] if init_path == '': raise Exception("Empty Path") except: sequence_path = os.path.join('../../output/generator', output_folder) try: input_folder = get_latest_folder(sequence_path) init_path = os.path.join(sequence_path, input_folder, "filaments.zip") except: return ticker = Ticker() ticker.tick("\nReading initial filaments...") initialization = upload_initialization(init_path) ticker.tock(" Finished.") sequence_output = os.path.join("../../output/tracking", output_folder) try: os.mkdir(sequence_output) except: pass run_output = os.path.join(sequence_output, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) try: os.mkdir(run_output) except: pass try: shutil.copy2(os.path.join(sequence_path, input_folder, 'colors.csv'), run_output) except: pass # Track all filaments ticker.tick("\nStarting tracking...") track_all(initialization, image_sequence, tracker_config, run_output) ticker.tock(" Tracking completed!")
import sys import time from poloniex import Poloniex import requests from ticker import Ticker config_path = '../../config/config.ini' logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) if __name__ == '__main__': config = configparser.ConfigParser() config.read(config_path) api = config['poloniex']['api'] secret = config['poloniex']['secret'] polo = Poloniex() ticker = Ticker('mongodb://192.168.1.179:27017/') while (True): print( ticker('BTC_STR')['last'], polo.returnTicker()['BTC_STR']['last']) time.sleep(1)
path = os.path.join(os.environ['BORG'], "brain", "data", "models", "RobotLab_1", "*.jpg") path = os.path.expanduser(path) image_names = glob.glob(path) images = [] for filename in image_names: print "Loading %s..." % filename img = cv.LoadImage(filename) images.append(img) print "Loaded %d images" % len(images) # Start the BinarySocket comp = True if not server else False sock = BinarySocket(host, port, server=server, bufsize=1024*256, compress=comp, compress_level=9) client = sock if not server else None ticker = Ticker(5) # Keep running until user interrupts while True: ticker.tick() if not client: client = sock.wait_connect(0.5) if not client: continue if not client.connected() and server: # If a client disconnected, wait for a new connection client = None continue
import sys import os import numpy as np dir_base = os.path.abspath('../') sys.path.append(dir_base) from ticker import Ticker from matplotlib import pyplot as plt if __name__ == '__main__': tck = Ticker('YPF') a, b, c = tck.get_boll() tck.get_close() tck.get_volume() vol = tck.get_volatility() tck.get_ma_close() print(vol.mean()) print(tck.get_status_ma()) vol_osc = tck.get_vol_osc() # http://pyhogs.github.io/plot-aspect-ratio.html plt.figure(figsize=(12.5, 3)) plt.bar(np.arange(vol_osc.size), vol_osc) plt.plot(vol_osc) plt.show() a, b, c = tck.get_lineal_macd() plt.figure(figsize=(12.5, 3)) plt.plot(a[30:])
def test_01(): tickers = ['^MERV', 'ALUA', 'BMA', 'BBAR', 'BYMA', 'CVH', 'CEPU', 'CRES', 'EDN', 'GGAL', 'VALO', 'SUPV', 'MIRG', 'PAMP', 'COME', 'TECO2', 'TXAR', 'TRAN', 'TGNO4', 'TGSUD2', 'YPFD', 'TS', 'APBR'] tickers = ['TEO', 'DESP', 'MELI', 'GLOB', 'BMA', 'BBAR', 'SUPV', 'GGAL', 'TS', 'TX', 'PAM', 'EDN', 'CEPU', 'YPF', 'PBR', 'TGS', 'LOMA', 'IRS', 'CRESY'] BA = False for ticker in tickers: try: if BA == True: tck = Ticker(ticker + '.BA', begin=begin) else: tck = Ticker(ticker, begin=begin) a, b, c = tck.get_boll() tck.get_ma_close() tck.get_volume() vol = tck.get_volatility(len_ma=20) print(ticker, vol.mean()) print(tck.get_status_ma()) except Exception as e: print(ticker, 'ERROR', e) plt.plot(vol) plt.show()
#!/usr/bin/env python # -*- coding: utf-8 -*- from PyQt4.QtGui import * from PyQt4.QtCore import * from ticker import Ticker import sys if __name__ == "__main__": app = QApplication(sys.argv) ticker = Ticker() ticker.setWindowTitle(ticker.tr("Ticker")) ticker.setText(ticker.tr("How long it lasted was impossible to say ++ ")) ticker.show() sys.exit(app.exec_())
def main(): ticker = Ticker() ticker.tick("Started filament generation...") input_dir = os.path.join("..", "..", "output", "preprocessing") output_dir = os.path.join("..", "..", "output", "generator") # Read configuration common_config = Config(os.path.join("..", "..", "config", "common.config")) # Read filaments generator configuration generator_config = Config( os.path.join("..", "..", "config", "generator.config")) folder_name = common_config["Output"]["Folder"] sequence_path = os.path.join(common_config["Image Sequence"]["Path"], common_config["Image Sequence"]["Filename"]) # Generate filaments filaments_original, filaments_filtered = generator.generate( input_dir, folder_name, generator_config) directory = os.path.join(output_dir, folder_name) # Make folder try: os.mkdir(directory) except: pass # Output path current_date = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') output_path = os.path.join( directory, "__".join([current_date, str(len(filaments_filtered))])) try: os.mkdir(output_path) except: pass # Get colors colors = get_random_colors(len(filaments_filtered)) # Get background image image_sequence = imread(sequence_path) background = np.squeeze(image_sequence)[0].astype(np.float32) background /= background.max() # Save filaments save_filaments(filaments_filtered, output_path, background, colors) # Plot sequence if __PLOT_OVERLAY__: plot_filaments(filaments_original, background, None, "Original filaments") plot_filaments(filaments_filtered, background, colors, "Filtered filaments", True) ticker.tock(" Generation finished.")
print "Loading %s..." % filename img = cv.LoadImage(filename) images.append(img) print "Loaded %d images" % len(images) # Start the BinarySocket comp = True if not server else False sock = BinarySocket(host, port, server=server, bufsize=1024 * 256, compress=comp, compress_level=9) client = sock if not server else None ticker = Ticker(5) # Keep running until user interrupts while True: ticker.tick() if not client: client = sock.wait_connect(0.5) if not client: continue if not client.connected() and server: # If a client disconnected, wait for a new connection client = None continue
class DisplayTk(object): def __init__(self): self.ticker = Ticker() if self.ticker.update_quote() == False : return self.rootWin = Tk() self.rootWin.protocol("WM_DELETE_WINDOW", self.tk_delete) self.quit = False self.displayDict = {} self.entry_count = 0 self.timer_interval = 10000 #in ms rowVal = 0 for elem in self.ticker.ticker_dict_list : labelStr = elem["Symbol"] + ": " + elem["LastTradePriceOnly"] price = StringVar() symbolWidget = Label(self.rootWin, textvariable=price) symbolWidget.grid(row=rowVal, column=0) price.set(labelStr) labelStr = elem["ChangeinPercent"] percentChange = StringVar() percentWidget = Label(self.rootWin, textvariable=percentChange) percentWidget.grid(row=rowVal, column=1) percentChange.set(labelStr) rowVal += 1 self.color_code(labelStr, symbolWidget, percentWidget) labelStr = "day: " + elem["DaysRange"] daysRange = StringVar() Label(self.rootWin, textvariable=daysRange).grid(row=rowVal) daysRange.set(labelStr) rowVal += 1 labelStr = "52week: " + elem["YearRange"] yearRange = StringVar() Label(self.rootWin, textvariable=yearRange).grid(row=rowVal) yearRange.set(labelStr) rowVal += 1 Label(self.rootWin, text="").grid(row=rowVal) rowVal += 1 self.displayDict[elem["Symbol"]] = \ (price, percentChange, daysRange, yearRange, symbolWidget, percentWidget) #threading.Timer(self.timer_interval, self.timer_callback).start() def tk_delete(self): print "deletion" self.quit = True self.rootWin.quit() def color_code(self, labelStr, symbolWidget, percentWidget): if labelStr[0] == "+" : symbolWidget.config(fg = "blue") percentWidget.config(fg = "blue") else : symbolWidget.config(fg = "red") percentWidget.config(fg = "red") def timer_callback(self): if self.ticker.update_quote() == False : self.rootWin.after(self.timer_interval, self.timer_callback) return if self.quit == True : print "quit" return for elem in self.ticker.ticker_dict_list : entry = elem["Symbol"] (price, percentChange, daysRange, yearRange, symbolWidget, percentWidget) \ = self.displayDict[entry] labelStr = elem["Symbol"] + ": " + elem["LastTradePriceOnly"] price.set(labelStr) labelStr = elem["ChangeinPercent"] percentChange.set(labelStr) self.color_code(labelStr, symbolWidget, percentWidget) labelStr = "day: " + elem["DaysRange"] daysRange.set(labelStr) labelStr = "52week: " + elem["YearRange"] yearRange.set(labelStr) #print "timer callback" #threading.Timer(self.timer_interval, self.timer_callback).start() self.rootWin.after(self.timer_interval, self.timer_callback) def display_loop(self): #print self.timer_interval self.rootWin.after(self.timer_interval, self.timer_callback) mainloop()