예제 #1
0
def register_virtual_device(socketio):
    """
    Register the virtual device callback to get invoked periodically.
    :param socketio:
    :return:
    """
    tl = Timeloop()
    tl._add_job(send_event, interval=timedelta(seconds=1), args={'socketio': socketio})
    # @tl.job(interval=timedelta(seconds=1))
    # def on_e_bike_event_received():
    #     return send_event(socketio)
    return tl.start()
예제 #2
0
class ExtractorJob():
    def __init__(self, periodicity, text_filter, source):
        self.periodicity = timedelta(seconds=periodicity)
        self.source = source
        self.text_filter = [text_filter]
        self.t1 = Timeloop()
        self.setup_logger()

    def test_function(self):
        self.logger.info('Begin {source_name} test'.format(
            source_name=self.source.__name__))
        print('In test function')

    def target_function(self):
        print('Begin target function')
        # Begin first test
        self.logger.info('Begin {source_name} test'.format(
            source_name=self.source.__name__))
        extractor = self.source()
        # Get the urls
        extractor.get_news_urls(datetime.today())
        # Extract text from news
        extractor.extract_text_from_news()
        # Filter by keywords
        extractor.filter_news_by_keywords(self.text_filter)
        # Close the extractor
        del extractor

    def setup_logger(self):
        # Configure logger: oddcrawler needsd to be the top logger
        self.logger = getLogger('oddcrawler')
        self.logger.setLevel(DEBUG)
        # create file file handler
        fh = FileHandler('extractor_test.log')
        fh.setLevel(DEBUG)
        # create console handler
        ch = StreamHandler()
        ch.setLevel(ERROR)
        # create formatter and add it to handlers
        formatter = Formatter('%(levelname)s %(asctime)-15s %(message)s')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)
        self.logger.addHandler(fh)
        self.logger.addHandler(ch)

    def run(self):
        self.t1._add_job(self.target_function, interval=self.periodicity)
        self.t1.start(block=True)
예제 #3
0
파일: cli.py 프로젝트: jrieke/awstrainer
def sync(key_file, user, remote_out_dir, local_sync_dir, every):

    if every == 0:
        # Sync only one time.
        sync_once(key_file, user, remote_out_dir, local_sync_dir)
    else:
        # Set up timer to sync regularly.
        tl = Timeloop()
        tl._add_job(
            sync_once,
            timedelta(seconds=every),
            key_file,
            user,
            remote_out_dir,
            local_sync_dir,
        )
        tl.start(block=True)
예제 #4
0
class PriceFeederJobBase:

    def __init__(self, price_f_config, config_net, connection_net):

        self.options = price_f_config
        self.config_network = config_net
        self.connection_network = connection_net

        # connection network is the brownie connection network
        # config network is our enviroment we want to connect
        network_manager.connect(connection_network=self.connection_network,
                                config_network=self.config_network)

        address_medianizer = self.options['networks'][self.config_network]['addresses']['MoCMedianizer']
        address_pricefeed = self.options['networks'][self.config_network]['addresses']['PriceFeed']

        log.info("Starting with MoCMedianizer: {}".format(address_medianizer))
        log.info("Starting with PriceFeed: {}".format(address_pricefeed))

        self.app_mode = self.options['networks'][self.config_network]['app_mode']

        # simulation don't write to blockchain
        self.is_simulation = False
        if 'is_simulation' in self.options:
            self.is_simulation = self.options['is_simulation']

        # Min prices source
        self.min_prices_source = 1
        if 'min_prices_source' in self.options:
            self.min_prices_source = self.options['min_prices_source']

        # backup writes
        self.backup_writes = 0

        self.tl = Timeloop()
        self.last_price = 0.0
        self.last_price_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=300)

        self.price_source = PriceEngines(self.options['networks'][self.config_network]['price_engines'],
                                         log=log,
                                         app_mode=self.app_mode,
                                         min_prices=self.min_prices_source)

    @staticmethod
    def aws_put_metric_exception(value):
        """ Only for AWS cloudwatch"""

        if 'AWS_ACCESS_KEY_ID' not in os.environ:
            return

        # Create CloudWatch client
        cloudwatch = boto3.client('cloudwatch')

        # Put custom metrics
        cloudwatch.put_metric_data(
            MetricData=[
                {
                    'MetricName': os.environ['PRICE_FEEDER_NAME'],
                    'Dimensions': [
                        {
                            'Name': 'PRICEFEEDER',
                            'Value': 'Error'
                        },
                    ],
                    'Unit': 'None',
                    'Value': value
                },
            ],
            Namespace='MOC/EXCEPTIONS'
        )

    def price_feed(self):
        """ Post price """
        return

    def price_feed_backup(self):
        """ Post price in backup mode """
        return

    def job_price_feed(self):

        try:
            self.price_feed()
        except Exception as e:
            log.error(e, exc_info=True)
            self.aws_put_metric_exception(1)

    def job_price_feed_backup(self):

        try:
            self.price_feed_backup()
        except Exception as e:
            log.error(e, exc_info=True)
            self.aws_put_metric_exception(1)

    def add_jobs(self):

        # creating the alarm
        self.aws_put_metric_exception(0)

        backup_mode = False
        if 'backup_mode' in self.options:
            if self.options['backup_mode']:
                backup_mode = True

        if backup_mode:
            log.info("Job Price feeder as BACKUP!")
            self.tl._add_job(self.job_price_feed_backup, datetime.timedelta(
                seconds=self.options['interval']))
        else:
            self.tl._add_job(self.job_price_feed, datetime.timedelta(
                seconds=self.options['interval']))

    def time_loop_start(self):

        self.add_jobs()
        self.tl.start()
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                self.tl.stop()
                break
예제 #5
0
파일: session.py 프로젝트: llunn/relaxed
class Session():
    """
  Interacts with a CouchDB server's REST API for session management.

  Examples:
    couchDB = Session(address="https://somehost.com", port="6984", username="******", password="******", address="https://somehost.com", port="6984")
    couchDB = Session(address="https://somehost.com", port="6984", auth_token="bGVlLmx1bm5AZGFsLmNhOjVERTQ4RjhCOq-IvL4mhVVUFn4k5H1bIYiggf3X")
    couchDB = Session(address="https://somehost.com", port="6984", username="******", password="******", keep_alive=290)

  Attributes:
  :param bool admin_party Determines whether or not to attempt connections to the CouchDB using Admin Party. (Default: False)
  :param str username: Username used to authenticate to the CouchDB server. (Default: None)
  :param str password: Password used to authenticate to the CouchDB server. (Default: None)
  :param str auth_token: AuthSession value used to authenticate to the CouchDB server.
    If provided, authentication using AuthSession will be attempted. AuthSession is set/updated
    with successful authentication when connecting with username and password. (Default: None)

  :param str host: Address that the CouchDB server is served from. (Default: http://127.0.0.1)
  :param int port: Port number that the CouchDB server is listening on. (Default: 5984)
  :param int keep_alive: Determines if automatic session renewal will be attempted and at what frequency. If > 0, session renewal is performed every keep_alive seconds. (Default: 0)
  :param bool auto_connect: Determines if an authentication attempt will be made during instancing of this object. (Default: False)
  :param bool basic_auth: Sets authentication method to the CouchDB server to Basic. If basic authentication is used, auto_connect has no effect. (Default: False)

  :param dict custom_headers: Dictionary of custom headers to add to each request to the CouchDB server. (Default: None)
  """
    def __init__(self, **kwargs):
        self._host = kwargs.get('host', 'http://127.0.0.1')
        self._port = kwargs.get('port', 5984)
        self.address = f'{self._host}:{self._port}'

        self.custom_headers = kwargs.get('custom_headers',
                                         {})  # TODO: implement

        self._keep_alive = kwargs.get('keep_alive', 0)
        self._keep_alive_timeloop = Timeloop()
        self._keep_alive_timeloop.logger.setLevel('WARNING')

        self._name = kwargs.get('username', None)
        self._password = kwargs.get('password', None)
        self.auth_token = kwargs.get('auth_token', None)

        self._auto_connect = kwargs.get('auto_connect', False)

        self._basic_auth = kwargs.get('basic_auth',
                                      False)  # TODO: implement basic auth
        self._admin_party = kwargs.get('admin_party',
                                       False)  # TODO: implement admin party

        self._headers = {
            'Content-type': 'application/json',
            'Accept': 'application/json'
        }

        # reference to this object is required for the CouchDBDecorators.endpoint to be able to update the auth token
        self.session = self

        # TODO: implement a generic Error class to hold error information that consumer can check
        if (self._auto_connect is True and self._basic_auth is False):
            self.authenticate(data={
                'name': self._name,
                'password': self._password
            })

    def __del__(self):
        if (self._keep_alive > 0):
            self._keep_alive_timeloop.stop()

    def _create_basic_auth_header(self):
        return requests.auth.HTTPBasicAuth(self._name, self._password)(
            requests.Request()).headers

    def set_auth_token_from_headers(self, headers):
        # if a new auth token is issued, include it in the response, otherwise, return the original
        if ('Set-Cookie' in headers):
            self.auth_token = headers.get('Set-Cookie').split(
                ';', 2)[0].split('=')[1]

    @RelaxedDecorators.endpoint('/_session',
                                method='post',
                                data_keys={
                                    'name': str,
                                    'password': str
                                })
    def authenticate(self, doc):
        return doc

    @RelaxedDecorators.endpoint('/_session')
    def get_session_info(self, doc):
        return doc

    @RelaxedDecorators.endpoint('/_session', method='delete')
    def close(self, doc):
        return doc if isinstance(doc, CouchError) else None

    def authenticate_via_proxy(self, username, roles, token):
        """
    Not implemented. See:
      https://docs.couchdb.org/en/stable/api/server/authn.html#proxy-authentication
      https://stackoverflow.com/a/40499853/3169479 (for implementation details)
    """
        pass

    def renew_session(self):
        """
    Alias for get_session_info()
    """
        return self.get_session_info()

    def keep_alive(self, isEnabled=False):
        """
    Enables or disables keep alive.
    """
        if (isEnabled is False):
            self._keep_alive_timeloop.stop()
        elif (isEnabled and self._keep_alive > 0
              and self.auth_token is not None):
            if (len(self._keep_alive_timeloop.jobs) == 0):
                self._keep_alive_timeloop._add_job(
                    func=self.renew_session,
                    interval=timedelta(seconds=self._keep_alive))
                self._keep_alive_timeloop.start()
예제 #6
0
                       MAX_ITERATIONS,
                       beta=1.1,
                       error_on=error,
                       error_start=args.error_start_index,
                       error_duration=10,
                       max_retries=NODES * 4,
                       manager=manager,
                       network_id=NETWORK_ID,
                       use_api=API,
                       api_address=API_ADDRESS))

        start_time = None
        end_time = None
        try:
            tl = Timeloop()
            tl._add_job(log_status, interval=timedelta(milliseconds=5000))
            tl.start()
            start_time = timer()
            for s in servers:
                s.daemon = True
                s.start()

            while (True):
                data = list(map(lambda s: s._j.k(), servers))
                if (np.min(data) >= MAX_ITERATIONS):
                    break
                else:
                    time.sleep(1)
        finally:
            end_time = timer()
            tl.stop()
예제 #7
0
class Server(object):
    def __init__(self, config: Config, signals: np.ndarray, output = None, interval = 1000, new_reference_signal_each_k = 5, max_iterations = 1200, error_on = False, error_start = 0, error_duration = 10, network_id = 2):
        """
            Creates a Server object using:
            str:host\t host address
            int:port\t port where server runs
            int:id\t ID of the Server (used in numpy to locate position in matrix)
            np.array: adjacency\t Adjacency matrix of the whole/sub system
            int:signal\t Initial value for the node to start
            dict:out_neighbors\t contains all outneighbor host addresses in key property
            bool: instant_start\t whether the server should start immediately

            Returns: Server(object).
        """

        logger = logging.getLogger(name='Server.__init__')

        manager = Manager() # used to synchronize data between processes and threads
        self._host = config.host
        self._port = config.port
        self.__address = (config.host, config.port)
        self._adjacency = config.adjacency
        self._id = config.id
        self._laplacian = utility.calculate_laplacian(self._adjacency)
        if not utility.check_laplacian(self._laplacian):
            raise BaseException("No valid laplacian")
        # self._beta = 1/np.max(np.linalg.eigvals(self._laplacian)) # calculates beta, moved to utility
        self._beta = utility.calculate_beta(self._laplacian)
        self._server_socket = socket(AF_INET, SOCK_STREAM)
        self._server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
        self._server_socket.bind(self.__address)
        self._j = J(0, signals[0], config.out_neighbors, manager)
        self._neighbor_states = manager.dict()
        self._interval = interval
        self._interval_sec = float(interval / 1000.0)
        self.__signals = signals
        self._new_reference_signal_each_k = new_reference_signal_each_k
        self.__output = output
        self.__max_iterations = max_iterations
        self.__running = False
        self.__neighbor_out_connections = manager.dict()
        self.__error_on = error_on
        self.__error_start = error_start
        self.__error_duration = error_duration
        self.__API_URL = 'http://10.0.2.2:7071'
        self.__NETWORK_ID = network_id
        self.__API_QUEUE = manager.list()
        self.__API_QUEUE_LOCK = Lock()
        
        self.__tl = Timeloop() # allows to start recurring threads in a given time interval
        if config.instant_start:
            self.start()
        # logger.debug(self)
        logger.warn(f"Using beta {self._beta:24.20f}")

    def start(self):
        ''' Starting server, acccept process and broadcast Threads. '''
        if self.__running:
            return
        try:
            self.__running = True
            self._server_socket.listen(500)
            self.accept_process = Process(target=self.accept_connections)
            self.accept_process.daemon = True
            self.accept_process.start()
            self.__tl._add_job(self.broadcast, interval=timedelta(milliseconds=self._interval))
            self.__tl._add_job(self.callApi, interval=timedelta(seconds=2))
            self.__tl.start()
        except BaseException as e:
            logging.getLogger(name='start').error(str(e))
            self.stop()

    def stop(self):
        ''' Stopping all processes and threads. '''
        if not self.__running:
            return
        self.__running = False
        # self.accept_process.terminate()
        self.accept_process.join(2)
        self._server_socket.close()

    def accept_connections(self):
        ''' Waits for new connections. Creates a handling Thread for all new connections. '''
        logger = logging.getLogger(name='Server:accept_connections')
        while True:
            try:
                logger.info("Waiting for new connection...")
                client, client_address = self._server_socket.accept()
                if self._j.k() < self.__max_iterations:
                    thread = Thread(target=self.handle_connection, args=(client,client_address))
                    thread.daemon = True
                    logger.info(f"Retrieving information from\t{client_address}\tat host {self.__address}")
                    thread.start()
                else:
                    return
            except IOError as e:
                if e.errno == errno.EPIPE:
                    return
                logger.error(str(e))

    def handle_connection(self, client: socket, client_address:(str, int)):
        ''' Handles each incomming connection and stores the message. '''
        logger = logging.getLogger(name='Server:handle_connection')
        while True:
            try:
                msg = self.receive(client)
                sender = msg[0]
                key = msg[2]
                state = msg[1]

                self._neighbor_states[(sender, key)] = state # sample item created: (('127.0.0.1', 4), 451.75)
                
                logger.info(f"[{self._host}]: Stored [{(sender, key)}]: {str(state)}")
                logger.debug(f"Node {self._id} has states {self._neighbor_states}")
                if key == self.__max_iterations:
                    break
            except OSError as e:
                if e.errno == errno.EPIPE:
                    break
                else:
                    logger.error(e)

    # @self.__tl.job(interval=timedelta(milliseconds=args.time))
    def broadcast(self):
        ''' Gets called by the Timeloop class. Creates a Thread which handles computation of new state and broadcasting. '''
        try:
            if self._j.k() < self.__max_iterations:
                # use the following to start the broadcast function as a new threat -> timeloop will continoue immediately and does not wait until finished
                # logging.getLogger(name='Server:broadcast').info("broadcast job current time: %s with data: %s" % (time.ctime(), str(self._neighbor_states)))
                # thread = Thread(target=self.broadcast_thread) #, args=(self._j, self._neighbor_states, self._adjacency)
                # thread.daemon = True
                # thread.start()
                # use this to have only one broadcast thread at once
                self.broadcast_thread()
            else:
                self.stop()
        except OSError as e:
            if e.errno == errno.EPIPE or e.errno == errno.ECONNRESET:
                self.stop()
            else:
                raise e

    def broadcast_thread(self):
        ''' Initially sends message to all neighbor nodes. Calculates new state each round and distributes it. '''
        logger = logging.getLogger(name='Server:broadcast_thread')
        relevant_states = dict(filter(
            lambda elem: elem[0][1] == self._j.k(),
            self._neighbor_states.items()
        )) # states with same k as own state 
        relevant_states = dict(map(
            lambda elem: (elem[0][0],float(elem[1])),
            relevant_states.items()
        )) # map dict to remove k from key variable (tuple): ((str, int), float) -> (str, float)

        if self._j.k() == 0:
            logger.info(f"Node {self._id} initially broadcasts its value.")
            self.distribute_state()

        x = np.zeros(np.shape(self._adjacency)[0])
        if len(relevant_states) == len(self._j.neighbors):
            logger.debug(f"{self._j.neighbors.items()}")
            for neighbor in relevant_states.keys():
                i = self._j.neighbors[neighbor]
                logger.info(f"{neighbor} > {i}")
                np.put(x, int(i), relevant_states[neighbor])

            np.put(x, self._id, self._j.state())

            # loading new reference signals
            sig_nr = int(self._j.k()/self._new_reference_signal_each_k)
            if self._j.k() > 0 and sig_nr < 200:
                if self._j.k() % self._new_reference_signal_each_k == 0:
                    self._j.set_reference_signal(self.__signals[sig_nr])
                else:
                    self._j.set_reference_signal(self._j.reference_signal())

            # calculating new state
            x_new = utility.calculate_iteration(self._id, self._laplacian, x, self._j.diff(), self._beta)
            self._j.increment_k()
            self._j.set_state(x_new)
            self.distribute_state()
            
            # add log to api queue
            with self.__API_QUEUE_LOCK:
                self.__API_QUEUE.append(
                    {
                        'nodeId': self._host,
                        'port': self._port,
                        'state': float(np.real(x_new)),
                        'neighborStates': relevant_states,
                        'iteration': self._j.k(),
                        'timestamp': datetime.utcnow().__str__(),
                        'networkId': self.__NETWORK_ID,
                        'referenceSignal': self._j.reference_signal()
                    }
                )
            

            logger.debug(f" UPDATED: Node {self._id}: {self._j.state()} | Others: {self._neighbor_states}")

            # log to output if specified
            if self.__output != None:
                try:
                    self.__output.write(str(float(self._j.state())) + '\n')
                except BaseException as e:
                    logger.error("Unable to wirte to output.\n" +  str(e))
        else:
            logger.info('Rebroadcasting on node: ' + self._host)
            self.distribute_state() # sends the new state to the neighbor nodes
            time.sleep(self._interval_sec)
        

        ''' Removes all data from previous calculations (session_key < k). '''
        states_to_remove = dict(filter(
            lambda elem: elem[0][1] < self._j.k(), # elem[0] is key of dict and consists of node name and session key k
            self._neighbor_states.items()
        ))
        
        for state in states_to_remove.items():
            try:
                del self._neighbor_states[state[0]]
            except KeyError:
                logger.warn(f"Key '{state[0]}' not found.")

    def distribute_state(self):
        ''' Sends the current state to all neighbor nodes. '''
        if self._j.k() >= self.__max_iterations:
            return
        for neighbor in self._j.neighbors.keys():
            try:
                if neighbor not in self.__neighbor_out_connections:
                    if (neighbor, self.__max_iterations) not in self._neighbor_states:
                        client_socket = socket(AF_INET, SOCK_STREAM)
                        client_socket.connect((neighbor, self._port))
                        self.__neighbor_out_connections[neighbor] = client_socket
                    else:
                        continue

                # adding error to own signal if selected
                error = 0.0
                if self.__error_on:
                    if self._j.k() >= self.__error_start and self._j.k() < self.__error_duration:
                        error = utility.calculate_error(self._j.k())
                        logging.debug(f"Adding error on node {self._id}: {error}")
                self.send(self.__neighbor_out_connections[neighbor], message=(self._host, float(self._j.state()+error), self._j.k()))
            except Exception as e:
                logging.getLogger(name='Server:distribute_state').debug(str(e))
                try:
                    self.__neighbor_out_connections[neighbor].close()
                except Exception as e:
                    pass
                finally:
                    del self.__neighbor_out_connections[neighbor]
                    logging.getLogger(name='Server:distribute_state').warn('Deleting sockeet connection for ' + neighbor)


    def send(self, channel: socket, message: object ):
        ''' Sends a message to another socket using JSON. '''
        try:
            msg = json.dumps(message)
            logging.info(f"sending data from {self._host}:\t{msg}")
            channel.send(struct.pack("i", len(msg)) + bytes(msg, "utf8"))
            return True
        except OSError as e:
            logging.error(e)
            return False

    def receive(self, channel: socket ) -> (str, float, int):
        ''' Receives a message from another socket using JSON. '''
        recv = channel.recv(struct.calcsize("i"))
        if len(recv) < 4:
            raise OSError(errno.EPIPE, 'Empyt message size', str(len(recv)))
        size = struct.unpack("i", recv)[0]
        data = ""
        while len(data) < size:
            msg = channel.recv(size - len(data))
            if not msg:
                return None
            data += msg.decode("utf8")
        logging.info(f"receiving data at {self._host}:\t{str(data)}")
        return json.loads( data.strip() )

    def callApi(self):
        try:
            with self.__API_QUEUE_LOCK:
                data = list(self.__API_QUEUE)
                self.__API_QUEUE[:] = []
            if len(data) > 0:
                requests.post(f"{self.__API_URL}/api/log", json=data)
        except ValueError as ve:
            logging.getLogger(name='callApi: ').error(ve)
        except ConnectionError as ce:
            logging.getLogger(name='callApi: ').error(ce)
        except TimeoutError as te:
            logging.getLogger(name='callApi: ').error(te)
        except requests.exceptions.RequestException as re:
            logging.getLogger(name='callApi: ').error(re)