Beispiel #1
0
    def driver(cls):

        cnx = sqlite3.connect('./api/api.db')
        df = pd.read_sql_query("SELECT * FROM binance_data", cnx)

        df['Open_time'] = pd.to_datetime(df['Open_time'])
        df['Close_time'] = pd.to_datetime(df['Close_time'])
        df.rename(columns={'Open_time': 'Date'}, inplace=True)
        df = df.drop(columns=['Quote_asset_volume', 'Buy_base_asset', 'Buy_quote_asset', 'Ignore'])
        Trading.counter = 0

        start_time = df['Date'].iloc[0]
        last_index = df.shape[0] - 1
        end_time = df['Date'].iloc[last_index]

        tl = Timeloop()

        @tl.job(interval=timedelta(seconds=2))
        def paper_trade():
            sliced_df = Trading.get_next_db(df)
            last_date = sliced_df['Date'].iloc[sliced_df.shape[0] - 1]
            temp = MAOMA.maomasig(sliced_df, start_time, end_time, 'Close', 5, 15)
            net = Portfolio.pf_manage(temp, 'Close')
            print("portfolio value: ", net)
            print("last date is : ", last_date)

        tl.start()
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                tl.stop()
                break
Beispiel #2
0
def main(hparams):

    metagraph = Metagraph(hparams)
    neuron = Neuron(hparams, metagraph)

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(neuron, server)
    server.add_insecure_port(hparams.bind_address + ":" + hparams.port)
    server.start()

    tl = Timeloop()
    set_timed_loops(tl, hparams, neuron, metagraph)
    tl.start(block=False)
    logger.info('Started Timers.')

    try:
        logger.info('Begin wait on main...')
        while True:
            logger.debug('heartbeat')
            time.sleep(100)

    except KeyboardInterrupt:
        logger.debug('Neuron stopped with keyboard interrupt.')
        server.stop()
        del neuron
        del metagraph

    except Exception as e:
        logger.error('Neuron stopped with interrupt on error: ' + str(e))
        server.stop()
        del neuron
        del metagraph
Beispiel #3
0
def wait_until():
    tl = Timeloop()

    thread = MyThread()

    @tl.job(interval=timedelta(seconds=0))
    def sample_job_every_2s():
        fibonacci(20)
        if True:
            print
            "2s job current time : {}".format(time.ctime())

    @tl.job(interval=timedelta(seconds=0))
    def sample_job_every_5s():
        fibonacci(1)
        print
        "5s job current time : {}".format(time.ctime())

    @tl.job(interval=timedelta(seconds=0))
    def sample_job_every_10s():
        fibonacci(20)
        print
        "10s job current time : {}".format(time.ctime())

    tl.start()
    while thread.is_alive():
        print('+++do something')
    tl.stop()
    return True
Beispiel #4
0
class Server(object):
    def __init__(self):

        self._tl = Timeloop()

        self._jobs = dict()

        self.api = TinaAPI(self)

    def run_api(self):
        self.api.start()

    @property
    def jobs(self):
        return self._jobs

    def register_job(self, job: Job) -> None:
        """
        Raises:
              ValueError: If job has already been registered
        """

        if self.job_exists(job):
            raise ValueError("The job {} has already been registered.".format(
                job.name))
        else:
            self._jobs[job.name] = JobContainer(
                job=job,
                job_start_timestamp=time.ctime(),
                job_interval=job.interval)

        interval = job.interval

        @self._tl.job(interval=interval)
        def wrapper() -> None:
            job.last_execution = time.ctime()
            job.run()

    def run(self) -> None:
        self._tl.start()

    def stop(self) -> None:
        self._tl.stop()

    def job_exists(self, job: Job) -> bool:
        if job.__class__.__name__ in self._jobs:
            return True
        else:
            return False

    def manual_trigger(self, job_name: str) -> bool:
        # blocking
        if job_name not in self.jobs:
            return False
        else:
            self.jobs[job_name].job.run()
            return True
Beispiel #5
0
    def _run_eviction(self):
        tl = Timeloop()

        @tl.job(interval=self.eviction_interval)
        def eviction():
            for shard in self.shards:
                shard.run_eviction()

        tl.start()
        self.tl = tl
Beispiel #6
0
def main(config):
    address = "[::]:8888"
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    peerstore = Peerstore(config)
    pstore_grpc.add_PeerstoreServicer_to_server(peerstore, server)
    server.add_insecure_port(address)

    tl = Timeloop()
    set_timed_loops(tl, config, peerstore)
    tl.start(block=False)
    logger.info('started timers')

    server.start()
    logger.info('peerstore server {} ...', address)
    server.wait_for_termination()
Beispiel #7
0
class ExtractorJob():
    def __init__(self, periodicity, text_filter, source):
        self.periodicity = timedelta(seconds=periodicity)
        self.source = source
        self.text_filter = [text_filter]
        self.t1 = Timeloop()
        self.setup_logger()

    def test_function(self):
        self.logger.info('Begin {source_name} test'.format(
            source_name=self.source.__name__))
        print('In test function')

    def target_function(self):
        print('Begin target function')
        # Begin first test
        self.logger.info('Begin {source_name} test'.format(
            source_name=self.source.__name__))
        extractor = self.source()
        # Get the urls
        extractor.get_news_urls(datetime.today())
        # Extract text from news
        extractor.extract_text_from_news()
        # Filter by keywords
        extractor.filter_news_by_keywords(self.text_filter)
        # Close the extractor
        del extractor

    def setup_logger(self):
        # Configure logger: oddcrawler needsd to be the top logger
        self.logger = getLogger('oddcrawler')
        self.logger.setLevel(DEBUG)
        # create file file handler
        fh = FileHandler('extractor_test.log')
        fh.setLevel(DEBUG)
        # create console handler
        ch = StreamHandler()
        ch.setLevel(ERROR)
        # create formatter and add it to handlers
        formatter = Formatter('%(levelname)s %(asctime)-15s %(message)s')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)
        self.logger.addHandler(fh)
        self.logger.addHandler(ch)

    def run(self):
        self.t1._add_job(self.target_function, interval=self.periodicity)
        self.t1.start(block=True)
Beispiel #8
0
def sync(key_file, user, remote_out_dir, local_sync_dir, every):

    if every == 0:
        # Sync only one time.
        sync_once(key_file, user, remote_out_dir, local_sync_dir)
    else:
        # Set up timer to sync regularly.
        tl = Timeloop()
        tl._add_job(
            sync_once,
            timedelta(seconds=every),
            key_file,
            user,
            remote_out_dir,
            local_sync_dir,
        )
        tl.start(block=True)
class FPSCounter:
    def __init__(self, in_q, out_q, terminating, avg_len=5):
        self.in_q = in_q
        self.out_q = out_q
        self.avg_len = avg_len
        self.terminating = terminating
        self.process_thd = threading.Thread(target=self.thd_etnry,
                                            name='fps_thd')
        self.fps = FPS()
        self.fps.start()
        self.fps.stop()
        self.last_fps = []
        self.tl = Timeloop()
        _deco = self.tl.job(interval=timedelta(seconds=1))
        _deco(self.fps_job)

    def start_thread(self):
        self.tl.start(block=False)
        self.process_thd.start()
        return self.process_thd

    def thd_etnry(self):
        print("fps thread started")
        while not self.terminating.is_set():
            ret = self.in_q.get()
            self.fps.update()
            try:
                self.out_q.put_nowait(ret)
            except queue.Full:
                pass

        self.tl.stop()
        print("fps thread terminated")

    def get_last_fps(self):
        return sum(self.last_fps) / len(self.last_fps)

    def fps_job(self):
        self.fps.stop()
        self.last_fps.append(self.fps.fps())
        self.fps.start()
        if len(self.last_fps) > self.avg_len:
            self.last_fps.pop(0)
        print(
            f"fps: curr={self.last_fps[-1]:3.2f}, min={min(self.last_fps):3.2f}, avg={self.get_last_fps():3.2f}, max={max(self.last_fps):3.2f}"
        )
Beispiel #10
0
    def __init__(self, game_object=None):
        super().__init__()
        if not game_object:
            print("No game object found, probably debug mode")
        self.game_object = game_object
        self.layout = QVBoxLayout()
        ##TODO : set widget size dynamically, depending on image size
        button_icon = Image.open("./src/button_icon.jpg")
        image_height = button_icon.height
        image_width = button_icon.width
        button_stylesheet = """ 
            QWidget{
                color: white; 
                height: %s;
                width: %d;
                background-image: url("./src/button_icon.jpg"); 
                background-repeat: no-repeat; 
                background-position: center;
                border: none;
            }
        """ % (image_height, image_width)
        self.credit_button = QPushButton('Click')
        self.credit_button.setStyleSheet(button_stylesheet)
        self.layout.addWidget(self.credit_button)
        self.credit_button.clicked.connect(self.on_credit_click)
        self.score_label = QLabel(str(self.game_object.credits_ects))
        self.score_label.setAlignment(QtCore.Qt.AlignCenter)
        self.professor_label = QLabel(
            str(self.game_object.professor) + " profs!")
        self.professor_label.setAlignment(QtCore.Qt.AlignCenter)
        self.buy_professor_button = QPushButton("Adopte un prof!")
        self.buy_professor_button.clicked.connect(self.on_buy_professor_click)
        self.layout.addWidget(self.buy_professor_button)
        self.layout.addWidget(self.score_label)
        self.layout.addWidget(self.professor_label)
        self.setLayout(self.layout)
        tl = Timeloop()

        @tl.job(interval=timedelta(seconds=10))
        def sample_job_every_10s():
            self.earn_upgrade_credits()

        tl.start()

        self.show()
Beispiel #11
0
    def on_init_complete(self):
        logger.info("App Init - completed")

        # Run all scoring methods
        if self._auto_update_scoring:
            self.async_scoring(None)

        # Run Cleanup Jobs
        def cleanup_sessions(instance):
            instance.cleanup_sessions()

        cleanup_sessions(self)
        time_loop = Timeloop()
        schedule.every(5).minutes.do(cleanup_sessions, self)

        @time_loop.job(interval=timedelta(seconds=30))
        def run_scheduler():
            schedule.run_pending()

        time_loop.start(block=False)
Beispiel #12
0
def main():

    config = Config()

    metagraph = Metagraph(config)

    dendrite = Dendrite(config, metagraph)

    nucleus = Nucleus(config)

    neuron = Neuron(config, dendrite, nucleus, metagraph)

    neuron.serve()

    # Start timed calls.
    tl = Timeloop()
    set_timed_loops(tl, config, neuron, metagraph)
    tl.start(block=False)
    logger.info('Started Timers.')

    def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph):
        logger.debug('tear down.')
        del _neuron
        del _dendrite
        del _nucleus
        del _metagraph
        del _config

    try:
        logger.info('Begin wait on main...')
        while True:
            logger.debug('heartbeat')
            time.sleep(100)

    except KeyboardInterrupt:
        logger.debug('Neuron stopped with keyboard interrupt.')
        tear_down(config, neuron, dendrite, nucleus, metagraph)

    except Exception as e:
        logger.error('Neuron stopped with interrupt on error: ' + str(e))
        tear_down(config, neuron, dendrite, nucleus, metagraph)
Beispiel #13
0
def register_virtual_device(socketio):
    """
    Register the virtual device callback to get invoked periodically.
    :param socketio:
    :return:
    """
    tl = Timeloop()
    tl._add_job(send_event, interval=timedelta(seconds=1), args={'socketio': socketio})
    # @tl.job(interval=timedelta(seconds=1))
    # def on_e_bike_event_received():
    #     return send_event(socketio)
    return tl.start()
def main(hparams):

    logger.info("Establishing Metagraph Component...")
    metagraph = Metagraph(hparams)

    logger.info("Building Transformer Components...")

    logger.info("Transforming Dataset...")
    lines = download_and_read_file(hparams.dataset)

    logger.info("Building Transformer...")
    nucleus = Transformer(hparams, lines)

    neuron = Neuron(hparams, nucleus, metagraph)
    neuron.serve()

    tl = Timeloop()
    set_timed_loops(tl, hparams, neuron, metagraph)
    tl.start(block=False)
    logger.info("Started timers...")

    def tear_down(_hparams, _neuron, _nucleus, _metagraph):
        logger.debug("Tear down...")
        del _neuron
        del _nucleus
        del _metagraph
        del _hparams

    try:
        logger.info("Begin wait on main...")
        while True:
            logger.debug('heartbeat')
            time.sleep(100)
    except KeyboardInterrupt:
        logger.debug("Neuron stopped with keyboard interrupt.")
        tear_down(hparams, neuron, nucleus, metagraph)

    except Exception as e:
        logger.error("Neuron stopped with interrupt on error: {}".format(e))
        tear_down(hparams, neuron, nucleus, metagraph)
Beispiel #15
0
class AsyncAgentPrototype():
    #both: 0
    #main: 1
    #self_check: 2
    def __init__(self,t1=None,t2=None):
        self.__t_main = Timeloop()
        self.__t_selfcheck = Timeloop()

        self._intv_main=t1
        self._intv_check=t2
   
    
    # @self.__t_main.job(interval=timedelta(seconds=_intv_main))
    # def sample_job_every_2s(self):
    #     print("2s job current time : {}".format(time.ctime()))

    # @self.__t_selfcheck.job(interval=timedelta(seconds=5))
    # def run_t(self):
    #         print("5s job current time : {}".format(time.ctime()))
    
    @staticmethod
    def __sec2int(sec):
        return timedelta(seconds=sec)

    def start(self,sel=0):
        if not sel==2: self.__t_main.start()
        if not sel==1: self.__t_selfcheck.start()

    def stop(self,sel=0):
        if not sel==2: self.__t_main.stop()
        if not sel==1: self.__t_selfcheck.stop()



    #assumption these event loops always hold ine job. R: get job id on attach
    def set_interval(self,t,sel=0):
        #TODO: clean this by making parameterizing loop.job
        if (not sel==2) and self.__t_main.jobs: self.__t_main.jobs[0].interval=self.__sec2int(t)
        if (not sel==1) and self.__t_selfcheck.jobs: self.__t_selfcheck.jobs[0].interval=self.__sec2int(t)
Beispiel #16
0
def main(hparams):
    metagraph = Metagraph(hparams)
    modelfn = Modelfn(hparams)
    nucleus = Nucleus(hparams, modelfn)
    dendrite = Dendrite(hparams, metagraph)
    dataset = Dataset(hparams)
    neuron = Neuron(hparams, nucleus, dendrite, dataset)
    synapse = Synapse(hparams, neuron, metagraph)

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    bittensor_grpc.add_BittensorServicer_to_server(synapse, server)
    server.add_insecure_port(hparams.bind_address + ":" + hparams.port)
    server.start()

    neuron.start_training()

    tl = Timeloop()
    set_timed_loops(tl, hparams, neuron)
    tl.start(block=False)
    logger.info('Started Timers.')

    try:
        logger.info('Begin wait on main...')
        while True:
            logger.debug('heartbeat')
            time.sleep(100)

    except KeyboardInterrupt:
        logger.debug('Neuron stopped with keyboard interrupt.')
        server.stop(2)
        del neuron
        del metagraph
        del synapse

    except Exception as e:
        logger.error('Neuron stopped with interrupt on error: ' + str(e))
        server.stop(2)
        del neuron
        del metagraph
Beispiel #17
0
class Reactor:
    """ Handles the waiting and firing of events to periodically anchor the the root hash.

    Assumes:
    - The chain/py_anchor_root.js file has all the correct variables defined
    """
    def __init__(self, tree, interval=60):
        """sets up the async event loop
        Parameters:
        tree - the same merkle tree instance being anchored
        interval - the optional parameter for setting the time interval of the anchor.

        Returns: an object that tracks the current phase, spawns subtasks
        """
        self.tree = tree
        self.loop = Timeloop()
        self.chain = Chain()
        self.interval = interval
        logger.info("The reactor was started at time: {time}",
                    time=datetime.timestamp(datetime.now()))

    def start(self):

        # Kick off the anchoring event every 30 seconds
        @self.loop.job(interval=timedelta(seconds=self.interval))
        def anchor_tree():
            root = self.tree.get_current_root().decode('utf-8')
            self.chain.anchor(root)
            logger.info("anchored root: {} \t at  time: {} \t block: {}", root,
                        datetime.timestamp(datetime.now()), self.chain.block)

        self.loop.start(block=True)

    def stop(self):
        """always ensure the loop has been manually stopped"""
        logger.info("timeloop stopping at time: {} \t block:{}",
                    datetime.timestamp(datetime.now()), self.chain.block)
        self.loop.stop()
Beispiel #18
0
logging.basicConfig(
    filename="logs/{}.log".format(
        datetime.datetime.now().strftime("%b-%d-%I%M%p-%G")
    ),
    level=logging.INFO,
    format=format_template,
)

formatter = logging.Formatter(format_template)
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.INFO)

logger = logging.getLogger("autopilot")
logger.addHandler(console)


timeloop = Timeloop()
agent = AgentFactory.create_from_base_configuration()


@timeloop.job(interval=timedelta(seconds=2))
def clock():
    agent.run()
    logger.info("agent.state = {}".format(agent.state))


if __name__ == "__main__":
    timeloop.start(block=True)
Beispiel #19
0
    def __init__(
            self,
            training_candles: ndarray, testing_candles: ndarray,
            optimal_total: int, cpu_cores: int,
            csv: bool,
            export_json: bool,
            start_date: str, finish_date: str,
            charset: str = r'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvw',
            fitness_goal: float = 1,
    ) -> None:
        if len(router.routes) != 1:
            raise NotImplementedError('optimize_mode mode only supports one route at the moment')

        self.strategy_name = router.routes[0].strategy_name
        self.exchange = router.routes[0].exchange
        self.symbol = router.routes[0].symbol
        self.timeframe = router.routes[0].timeframe
        strategy_class = jh.get_strategy_class(self.strategy_name)
        self.strategy_hp = strategy_class.hyperparameters(None)
        solution_len = len(self.strategy_hp)

        if solution_len == 0:
            raise exceptions.InvalidStrategy('Targeted strategy does not implement a valid hyperparameters() method.')

        self.started_index = 0
        self.start_time = jh.now_to_timestamp()
        self.population = []
        self.iterations = 2000 * solution_len
        self.population_size = solution_len * 100
        self.solution_len = solution_len
        self.charset = charset
        self.fitness_goal = fitness_goal
        self.cpu_cores = 0
        self.optimal_total = optimal_total
        self.training_candles = training_candles
        self.testing_candles = testing_candles
        self.average_execution_seconds = 0

        # check for termination event once per second
        tl_0 = Timeloop()
        @tl_0.job(interval=timedelta(seconds=1))
        def check_for_termination():
            if process_status() != 'started':
                raise exceptions.Termination
        tl_0.start()

        options = {
            'strategy_name': self.strategy_name,
            'exchange': self.exchange,
            'symbol': self.symbol,
            'timeframe': self.timeframe,
            'strategy_hp': self.strategy_hp,
            'csv': csv,
            'json': export_json,
            'start_date': start_date,
            'finish_date': finish_date,
        }

        self.options = {} if options is None else options
        os.makedirs('./storage/temp/optimize', exist_ok=True)
        self.temp_path = f"./storage/temp/optimize/{self.options['strategy_name']}-{self.options['exchange']}-{self.options['symbol']}-{self.options['timeframe']}-{self.options['start_date']}-{self.options['finish_date']}.pickle"

        if fitness_goal > 1 or fitness_goal < 0:
            raise ValueError('fitness scores must be between 0 and 1')

        if not optimal_total > 0:
            raise ValueError('optimal_total must be bigger than 0')

        # # if temp file exists, load data to resume previous session
        # if jh.file_exists(self.temp_path) and click.confirm(
        #         'Previous session detected. Do you want to resume?', default=True
        # ):
        #     self.load_progress()

        if cpu_cores > cpu_count():
            raise ValueError(f'Entered cpu cores number is more than available on this machine which is {cpu_count()}')
        elif cpu_cores == 0:
            self.cpu_cores = cpu_count()
        else:
            self.cpu_cores = cpu_cores
Beispiel #20
0
        CommandHandler("start",
                       start,
                       pass_job_queue=True,
                       pass_chat_data=True))
    dp.add_handler(CommandHandler("stop", stop))
    dp.add_handler(CommandHandler("rate", rate))
    dp.add_handler(MessageHandler(Filters.regex('ping'), ping))

    dp.bot.send_animation(
        chat_id=371439949,
        caption='Bot started..',
        duration=5,
        animation=
        'https://i.pinimg.com/originals/eb/24/ac/eb24ac9ceb8b614128ed5945a385206a.gif'
    )

    updater.start_polling()
    updater.idle()


if __name__ == '__main__':
    repeater.start(block=False)
    main()
    while True:
        try:
            time.sleep(1000)
        except KeyboardInterrupt:
            repeater.stop()
            updater.stop()
            break
            if "red" in mail_content:
                print(f'Red found!')
                leds.fill((255, 0, 0))
            elif "green" in mail_content:
                print(f'Green found!')
                leds.fill((0, 255, 0))
            elif "orange" in mail_content:
                print(f'Orange found!')
                leds.fill((255, 50, 0))
            elif "yellow" in mail_content:
                print(f'Yellow found!')
                leds.fill((255, 255, 0))
            date_comp = datetime.datetime.strptime(mail_date,
                                                   '%a, %d %b %Y %H:%M:%S %z')
            date_string = date_comp.strftime("%d-%b-%Y")
            print(f'comp date string: {date_string}')
            if date_string > checkdate:
                checkdate = date_string
                print(f'New date: {checkdate}')
    leds.show()


tl.start()

while True:
    try:
        time.sleep(1)
    except KeyboardInterrupt:
        tl.stop()
        break
Beispiel #22
0
import time
from timeloop import Timeloop
from datetime import timedelta

t = Timeloop()


@t.job(interval=timedelta(secconds=3))
def show():
    print('text')


t.start()
Beispiel #23
0
def serve():

    # TODO(const) Use Hparams and FLAGS like in ARC nets.
    config = Config()
    logger.debug(config)

    # The metagrpah manages the global network state.
    metagraph = Metagraph(config)

    # The dendrite manages our connections to 'upstream' nodes.
    dendrite = Dendrite(config, metagraph)

    # The nucleus trains the NN object.
    nucleus = Nucleus(config, metagraph, dendrite)

    # The synapse manages our connection to downstream nodes.
    synapse = BoltServicer(config, metagraph)
    logger.info('Started Synapse.')

    # Start the Nucleus.
    nucleus.start()
    logger.info('Started Nucleus.')

    # Start timed calls.
    tl = Timeloop()
    set_timed_loops(tl, metagraph, nucleus, synapse, dendrite)
    tl.start(block=False)
    logger.info('Started Timers.')

    # Serve the synapse on a grpc server.
    server_address = config.bind_address + ":" + config.port
    grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    bittensor.proto.bittensor_pb2_grpc.add_BittensorServicer_to_server(
        synapse, grpc_server)
    grpc_server.add_insecure_port(server_address)
    logger.debug('Served synapse on: {}.', server_address)
    grpc_server.start()

    def tear_down(_server, _nucleus, _metagraph, _dendrite, _synapse):
        _server.stop(0)
        _nucleus.stop()
        del _metagraph
        del _dendrite
        del _nucleus
        del _synapse

    try:
        logger.info('Begin wait on main...')
        while True:

            # NOTE(const): Matplotib must run in the main thread.
            image_buffer = visualization.generate_edge_weight_buffer(
                metagraph.nodes)
            nucleus.update_metagraph_summary(image_buffer)
            logger.info('Updated metagraph image.')
            time.sleep(30)

    except KeyboardInterrupt:
        logger.debug('keyboard interrupt.')
        tear_down(grpc_server, nucleus, metagraph, dendrite, synapse)

    except:
        logger.error('unknown interrupt.')
        tear_down(grpc_server, nucleus, metagraph, dendrite, synapse)
Beispiel #24
0
def train_gtfs_func(data):
    feed = gtfs_realtime_pb2.FeedMessage()
    feed.ParseFromString(data)
    decoded_data = json.loads(json_format.MessageToJson(feed))
    return decoded_data


@t1.job(interval=timedelta(seconds=TIME_TO_WAIT))
def produce():
    [x.produce(1) for x in producers]


if __name__ == "__main__":
    train_position_producer = CustomProducer('train_positions',
                                             KAFKA['start_utc'],
                                             train_pos_func, 'train_positions')

    train_gtfs_producer = CustomProducer('train_gtfs', KAFKA['start_utc'],
                                         train_gtfs_func,
                                         'train_positions_gtfs')

    producers = [train_position_producer, train_gtfs_producer]

    # TODO: Should this be run by default or in debug?
    train_gtfs_producer.produce(produce_all=True)
    train_position_producer.produce(produce_all=True)
    # [x.produce(250) for x in producers]

    t1.start(block=True)
            self.notifier_windows.show_toast ( title = "" , msg = f"Connection Status: { self.connection_status[ index ][ 0 ] }" , duration = self.duration , icon_path = self.ICON_PATH ) 

        elif self.platform == "linux":
            self.n.update(f"Connection Status: { self.connection_status[ index ][ 0 ] }", icon = self.ICON_PATH)
            self.n.show()


router = DLink2730URouterStatus()

# Todo: Start timeloop
timeloop = Timeloop()

@timeloop.job ( interval = timedelta ( seconds = router.duration ) )
def status_notifier():
    
    status = router.get_connection_status()

    # Already reported OR encounter error
    if ( ( router.buffer == status ) or ( status == router.ignore ) ) :
        pass
    # Success
    else:
        router.notify ( status )

        # Update buffer
        router.buffer = status


# Invoke timeloop
if __name__ == "__main__": timeloop.start ( block = True )
Beispiel #26
0
         name="celostatni-opatreni"),
    path("aktualnost/", views.aktualnost, name="aktualnost"),
    # path('statistiky/', views.stats, name='statistiky'),
    path("api/search", find_place_by_name, name="najdi_mesto"),
    path("api/update_stats", get_update_stats, name="update_stats"),
    path("api/all_update_stats", get_all_update_stats, name="update_stats"),
    path("admin/kontrola-zadaneho/",
         kontrola_zadaneho,
         name="admin_kontrola_zadaneho"),
    path("admin/graphs/", graphs, name="admin_graf"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)

handler404 = custom_page_not_found_view
handler500 = custom_error_view
handler403 = custom_permission_denied_view
handler400 = custom_bad_request_view

if DEV:
    urlpatterns += path('__debug__/', include(debug_toolbar.urls)),

tl = Timeloop()


@tl.job(interval=timedelta(seconds=300))
def sample_job_every_300s():
    if not BETA and not DEV:
        requests.get("https://potrebujurousku.cz/aktualnost/")


tl.start(block=False)
Beispiel #27
0
def run(
        debug_mode,
        user_config: dict,
        routes: List[Dict[str, str]],
        extra_routes: List[Dict[str, str]],
        start_date: str,
        finish_date: str,
        candles: dict = None,
        chart: bool = False,
        tradingview: bool = False,
        full_reports: bool = False,
        csv: bool = False,
        json: bool = False
) -> None:
    if not jh.is_unit_testing():
        # at every second, we check to see if it's time to execute stuff
        status_checker = Timeloop()
        @status_checker.job(interval=timedelta(seconds=1))
        def handle_time():
            if process_status() != 'started':
                raise exceptions.Termination
        status_checker.start()

    from jesse.config import config, set_config
    config['app']['trading_mode'] = 'backtest'

    # debug flag
    config['app']['debug_mode'] = debug_mode

    # inject config
    if not jh.is_unit_testing():
        set_config(user_config)

    # set routes
    router.initiate(routes, extra_routes)

    store.app.set_session_id()

    register_custom_exception_handler()

    # clear the screen
    if not jh.should_execute_silently():
        click.clear()

    # validate routes
    validate_routes(router)

    # initiate candle store
    store.candles.init_storage(5000)

    # load historical candles
    if candles is None:
        candles = load_candles(start_date, finish_date)
        click.clear()

    if not jh.should_execute_silently():
        sync_publish('general_info', {
            'session_id': jh.get_session_id(),
            'debug_mode': str(config['app']['debug_mode']),
        })

        # candles info
        key = f"{config['app']['considering_candles'][0][0]}-{config['app']['considering_candles'][0][1]}"
        sync_publish('candles_info', stats.candles_info(candles[key]['candles']))

        # routes info
        sync_publish('routes_info', stats.routes(router.routes))

    # run backtest simulation
    simulator(candles, run_silently=jh.should_execute_silently())

    # hyperparameters (if any)
    if not jh.should_execute_silently():
        sync_publish('hyperparameters', stats.hyperparameters(router.routes))

    if not jh.should_execute_silently():
        if store.completed_trades.count > 0:
            sync_publish('metrics', report.portfolio_metrics())

            routes_count = len(router.routes)
            more = f"-and-{routes_count - 1}-more" if routes_count > 1 else ""
            study_name = f"{router.routes[0].strategy_name}-{router.routes[0].exchange}-{router.routes[0].symbol}-{router.routes[0].timeframe}{more}-{start_date}-{finish_date}"
            store_logs(study_name, json, tradingview, csv)

            if chart:
                charts.portfolio_vs_asset_returns(study_name)

            sync_publish('equity_curve', charts.equity_curve())

            # QuantStats' report
            if full_reports:
                price_data = []
                # load close candles for Buy and hold and calculate pct_change
                for index, c in enumerate(config['app']['considering_candles']):
                    exchange, symbol = c[0], c[1]
                    if exchange in config['app']['trading_exchanges'] and symbol in config['app']['trading_symbols']:
                        # fetch from database
                        candles_tuple = Candle.select(
                            Candle.timestamp, Candle.close
                        ).where(
                            Candle.timestamp.between(jh.date_to_timestamp(start_date),
                                                     jh.date_to_timestamp(finish_date) - 60000),
                            Candle.exchange == exchange,
                            Candle.symbol == symbol
                        ).order_by(Candle.timestamp.asc()).tuples()

                        candles = np.array(candles_tuple)

                        timestamps = candles[:, 0]
                        price_data.append(candles[:, 1])

                price_data = np.transpose(price_data)
                price_df = pd.DataFrame(price_data, index=pd.to_datetime(timestamps, unit="ms"), dtype=float).resample(
                    'D').mean()
                price_pct_change = price_df.pct_change(1).fillna(0)
                bh_daily_returns_all_routes = price_pct_change.mean(1)
                quantstats.quantstats_tearsheet(bh_daily_returns_all_routes, study_name)
        else:
            sync_publish('equity_curve', None)
            sync_publish('metrics', None)

    # close database connection
    from jesse.services.db import database
    database.close_connection()
Beispiel #28
0
class Session():
    """
  Interacts with a CouchDB server's REST API for session management.

  Examples:
    couchDB = Session(address="https://somehost.com", port="6984", username="******", password="******", address="https://somehost.com", port="6984")
    couchDB = Session(address="https://somehost.com", port="6984", auth_token="bGVlLmx1bm5AZGFsLmNhOjVERTQ4RjhCOq-IvL4mhVVUFn4k5H1bIYiggf3X")
    couchDB = Session(address="https://somehost.com", port="6984", username="******", password="******", keep_alive=290)

  Attributes:
  :param bool admin_party Determines whether or not to attempt connections to the CouchDB using Admin Party. (Default: False)
  :param str username: Username used to authenticate to the CouchDB server. (Default: None)
  :param str password: Password used to authenticate to the CouchDB server. (Default: None)
  :param str auth_token: AuthSession value used to authenticate to the CouchDB server.
    If provided, authentication using AuthSession will be attempted. AuthSession is set/updated
    with successful authentication when connecting with username and password. (Default: None)

  :param str host: Address that the CouchDB server is served from. (Default: http://127.0.0.1)
  :param int port: Port number that the CouchDB server is listening on. (Default: 5984)
  :param int keep_alive: Determines if automatic session renewal will be attempted and at what frequency. If > 0, session renewal is performed every keep_alive seconds. (Default: 0)
  :param bool auto_connect: Determines if an authentication attempt will be made during instancing of this object. (Default: False)
  :param bool basic_auth: Sets authentication method to the CouchDB server to Basic. If basic authentication is used, auto_connect has no effect. (Default: False)

  :param dict custom_headers: Dictionary of custom headers to add to each request to the CouchDB server. (Default: None)
  """
    def __init__(self, **kwargs):
        self._host = kwargs.get('host', 'http://127.0.0.1')
        self._port = kwargs.get('port', 5984)
        self.address = f'{self._host}:{self._port}'

        self.custom_headers = kwargs.get('custom_headers',
                                         {})  # TODO: implement

        self._keep_alive = kwargs.get('keep_alive', 0)
        self._keep_alive_timeloop = Timeloop()
        self._keep_alive_timeloop.logger.setLevel('WARNING')

        self._name = kwargs.get('username', None)
        self._password = kwargs.get('password', None)
        self.auth_token = kwargs.get('auth_token', None)

        self._auto_connect = kwargs.get('auto_connect', False)

        self._basic_auth = kwargs.get('basic_auth',
                                      False)  # TODO: implement basic auth
        self._admin_party = kwargs.get('admin_party',
                                       False)  # TODO: implement admin party

        self._headers = {
            'Content-type': 'application/json',
            'Accept': 'application/json'
        }

        # reference to this object is required for the CouchDBDecorators.endpoint to be able to update the auth token
        self.session = self

        # TODO: implement a generic Error class to hold error information that consumer can check
        if (self._auto_connect is True and self._basic_auth is False):
            self.authenticate(data={
                'name': self._name,
                'password': self._password
            })

    def __del__(self):
        if (self._keep_alive > 0):
            self._keep_alive_timeloop.stop()

    def _create_basic_auth_header(self):
        return requests.auth.HTTPBasicAuth(self._name, self._password)(
            requests.Request()).headers

    def set_auth_token_from_headers(self, headers):
        # if a new auth token is issued, include it in the response, otherwise, return the original
        if ('Set-Cookie' in headers):
            self.auth_token = headers.get('Set-Cookie').split(
                ';', 2)[0].split('=')[1]

    @RelaxedDecorators.endpoint('/_session',
                                method='post',
                                data_keys={
                                    'name': str,
                                    'password': str
                                })
    def authenticate(self, doc):
        return doc

    @RelaxedDecorators.endpoint('/_session')
    def get_session_info(self, doc):
        return doc

    @RelaxedDecorators.endpoint('/_session', method='delete')
    def close(self, doc):
        return doc if isinstance(doc, CouchError) else None

    def authenticate_via_proxy(self, username, roles, token):
        """
    Not implemented. See:
      https://docs.couchdb.org/en/stable/api/server/authn.html#proxy-authentication
      https://stackoverflow.com/a/40499853/3169479 (for implementation details)
    """
        pass

    def renew_session(self):
        """
    Alias for get_session_info()
    """
        return self.get_session_info()

    def keep_alive(self, isEnabled=False):
        """
    Enables or disables keep alive.
    """
        if (isEnabled is False):
            self._keep_alive_timeloop.stop()
        elif (isEnabled and self._keep_alive > 0
              and self.auth_token is not None):
            if (len(self._keep_alive_timeloop.jobs) == 0):
                self._keep_alive_timeloop._add_job(
                    func=self.renew_session,
                    interval=timedelta(seconds=self._keep_alive))
                self._keep_alive_timeloop.start()
Beispiel #29
0
            "Currently,the air quality is safe to the general public. However, people who are sensitive to pollution may experience more serious health effects"
        )
    elif pm10_data > 150 and pm10_data <= 200:
        print(
            "Currently,the air quality is unhealthy. The general public may experience mild health effects"
        )
    elif pm10_data > 200 and pm10_data <= 300:
        print(
            "Currently,the air quality is very unhealthy. The general public may experience mild health effects"
        )
    else:
        print(
            "Currently,the air quality is hazardous. The general public may experience serious health effects"
        )

    print("Right now the average pm10 data is: " +
          str(avg_pm10_data[0]['avg_pm10']) + "\n")
    print("Right now the average weather is: " +
          str(avg_pm10_data[0]['avg_weather']) + "\n")
    print("The current weather is: " +
          str(weather_results['results'][0]['temp']['value']) + "F")


def main():
    sample_job_every_120s()


if __name__ == "__main__":
    tl.start(block=True)
    main()
Beispiel #30
0
class PriceFeederJobBase:

    def __init__(self, price_f_config, config_net, connection_net):

        self.options = price_f_config
        self.config_network = config_net
        self.connection_network = connection_net

        # connection network is the brownie connection network
        # config network is our enviroment we want to connect
        network_manager.connect(connection_network=self.connection_network,
                                config_network=self.config_network)

        address_medianizer = self.options['networks'][self.config_network]['addresses']['MoCMedianizer']
        address_pricefeed = self.options['networks'][self.config_network]['addresses']['PriceFeed']

        log.info("Starting with MoCMedianizer: {}".format(address_medianizer))
        log.info("Starting with PriceFeed: {}".format(address_pricefeed))

        self.app_mode = self.options['networks'][self.config_network]['app_mode']

        # simulation don't write to blockchain
        self.is_simulation = False
        if 'is_simulation' in self.options:
            self.is_simulation = self.options['is_simulation']

        # Min prices source
        self.min_prices_source = 1
        if 'min_prices_source' in self.options:
            self.min_prices_source = self.options['min_prices_source']

        # backup writes
        self.backup_writes = 0

        self.tl = Timeloop()
        self.last_price = 0.0
        self.last_price_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=300)

        self.price_source = PriceEngines(self.options['networks'][self.config_network]['price_engines'],
                                         log=log,
                                         app_mode=self.app_mode,
                                         min_prices=self.min_prices_source)

    @staticmethod
    def aws_put_metric_exception(value):
        """ Only for AWS cloudwatch"""

        if 'AWS_ACCESS_KEY_ID' not in os.environ:
            return

        # Create CloudWatch client
        cloudwatch = boto3.client('cloudwatch')

        # Put custom metrics
        cloudwatch.put_metric_data(
            MetricData=[
                {
                    'MetricName': os.environ['PRICE_FEEDER_NAME'],
                    'Dimensions': [
                        {
                            'Name': 'PRICEFEEDER',
                            'Value': 'Error'
                        },
                    ],
                    'Unit': 'None',
                    'Value': value
                },
            ],
            Namespace='MOC/EXCEPTIONS'
        )

    def price_feed(self):
        """ Post price """
        return

    def price_feed_backup(self):
        """ Post price in backup mode """
        return

    def job_price_feed(self):

        try:
            self.price_feed()
        except Exception as e:
            log.error(e, exc_info=True)
            self.aws_put_metric_exception(1)

    def job_price_feed_backup(self):

        try:
            self.price_feed_backup()
        except Exception as e:
            log.error(e, exc_info=True)
            self.aws_put_metric_exception(1)

    def add_jobs(self):

        # creating the alarm
        self.aws_put_metric_exception(0)

        backup_mode = False
        if 'backup_mode' in self.options:
            if self.options['backup_mode']:
                backup_mode = True

        if backup_mode:
            log.info("Job Price feeder as BACKUP!")
            self.tl._add_job(self.job_price_feed_backup, datetime.timedelta(
                seconds=self.options['interval']))
        else:
            self.tl._add_job(self.job_price_feed, datetime.timedelta(
                seconds=self.options['interval']))

    def time_loop_start(self):

        self.add_jobs()
        self.tl.start()
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                self.tl.stop()
                break