Ejemplo n.º 1
0
def pre_build_engine() -> None:
    engine_running = Event()
    executor = GeventExecutor()
    Engine.handle(executor, lambda: engine_running.set())
    engine_running.wait()
Ejemplo n.º 2
0
    def test_derived_data_product(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='ctd parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition,
                        ctd_stream_def_id)

        dp = DataProduct(name='Instrument DP')
        dp_id = self.dpsc_cli.create_data_product(
            dp, stream_definition_id=ctd_stream_def_id)
        self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id,
                                                    predicate=PRED.hasDataset,
                                                    id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" %
                           str(dp_id))
        dataset_id = dataset_ids[0]

        # Make the derived data product
        simple_stream_def_id = self.pubsubcli.create_stream_definition(
            name='TEMPWAT stream def',
            parameter_dictionary_id=pdict_id,
            available_fields=['time', 'temp'])
        tempwat_dp = DataProduct(name='TEMPWAT',
                                 category=DataProductTypeEnum.DERIVED)
        tempwat_dp_id = self.dpsc_cli.create_data_product(
            tempwat_dp,
            stream_definition_id=simple_stream_def_id,
            parent_data_product_id=dp_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
        # Check that the streams associated with the data product are persisted with
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                   RT.Stream, True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)
        rdt['pressure'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id, route)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id,
                                                            PRED.hasDataset,
                                                            id_only=True)
        tempwat_dataset_id = tempwat_dataset_ids[0]
        granule = self.data_retriever.retrieve(
            tempwat_dataset_id, delivery_format=simple_stream_def_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['time'], np.arange(20))
        self.assertEquals(set(rdt.fields), set(['time', 'temp']))
Ejemplo n.º 3
0
workers = args['threads']
invunma = args['invunma']
grabactiv = args['grabber']
file_match = args['matchfile']
time_out = args['timeout']
resumer = args['resume']
p_mode = args['big']
scan_unknow_host = args["unknownhosts"]
grabb_all = args["grabball"]
snap_shot = args["snap"]

#monkey patching libs which a supported by gevent
gevent.monkey.patch_all()

#registering an event and signal handler
evt = Event()
signal.signal(signal.SIGINT, handler)

#init ressources
init_ImapConfig()
if grabactiv:
    init_matchers()

#init of queues
q = gevent.queue.Queue(maxsize=50000)  #loader
q_valid = gevent.queue.Queue()  #valid
q_status = gevent.queue.Queue()  #status
if invunma:
    q_invalid = gevent.queue.Queue()  #invalid
    q_unmatched = gevent.queue.Queue()  #unmatched
if grabactiv:
Ejemplo n.º 4
0
from interfaces import dispatcher, AppBlueprint
from walkoff.events import WalkoffEvent
from flask import Blueprint, jsonify, Response
from flask_jwt_extended import jwt_required
from gevent import sleep
from gevent.event import AsyncResult, Event
from datetime import datetime
from walkoff.security import jwt_required_in_query
from walkoff.helpers import create_sse_event

blueprint = AppBlueprint(blueprint=Blueprint('HelloWorldPage__', __name__))

hello_world_action_count = {}

action_event_json = AsyncResult()
action_signal = Event()

action_event_id_counter = 0


@dispatcher.on_app_actions('HelloWorld',
                           events=WalkoffEvent.ActionStarted,
                           weak=False)
def handle_action_start(data):
    global hello_world_action_count
    action_name = data['action_name']

    if action_name not in hello_world_action_count:
        hello_world_action_count[action_name] = 1
    else:
        hello_world_action_count[action_name] += 1
Ejemplo n.º 5
0
 def __init__(self):
     Greenlet.__init__(self)
     self._stop_evt=Event()
     self.settings=None
Ejemplo n.º 6
0
 def wait_for_message(self, message_type: Message, attributes: dict):
     assert not any(attributes == waiting.attributes
                    for waiting in self.waiting[message_type])
     event = Event()
     self.waiting[message_type].append(MessageWaiting(attributes, event))
     return event
Ejemplo n.º 7
0
import random
import time
from threading import Thread

from flask import Blueprint, Response
from gevent import sleep
from gevent.event import Event, AsyncResult

from interfaces import AppBlueprint

blueprint = AppBlueprint(blueprint=Blueprint('HelloWorldPage', __name__))
blueprint2 = AppBlueprint(blueprint=Blueprint('HelloWorldPage2', __name__),
                          rule='/<string:action>')

__sync_signal = Event()
random_event_result = AsyncResult()


def load(*args, **kwargs):
    return {}


# These blueprints will be registered with the Flask app and can be used to make your own endpoints.
@blueprint.blueprint.route('/test_blueprint')
def test_basic_blueprint():
    # This can be called using the url /apps/HelloWorld/test_blueprint
    return 'successfully called basic blueprint'


def random_number_receiver():
    while True:
Ejemplo n.º 8
0
 def __init__(self):
     self.events = set()
     self.event = Event()
Ejemplo n.º 9
0
 def __init__(self, listen_addr='', listen_port=53):
     print("Initializing DNSServer")
     self.resolutions = {}
     self.shutting_down = Event()
     self.listen_addr = listen_addr
     self.listen_port = listen_port
Ejemplo n.º 10
0
    def __init__(self, chain, default_registry, private_key_bin, transport,
                 discovery, config):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
            or config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        self.token_to_channelgraph = dict()
        self.tokens_to_connectionmanagers = dict()
        self.manager_to_token = dict()

        self.identifier_to_statemanagers = defaultdict(list)
        self.identifier_to_results = defaultdict(list)

        # This is a map from a hashlock to a list of channels, the same
        # hashlock can be used in more than one token (for tokenswaps), a
        # channel should be removed from this list only when the lock is
        # released/withdrawn but not when the secret is registered.
        self.token_to_hashlock_to_channels = defaultdict(
            lambda: defaultdict(list))

        self.chain = chain
        self.default_registry = default_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)

        endpoint_registration_event = gevent.spawn(
            discovery.register,
            self.address,
            config['external_ip'],
            config['external_port'],
        )
        endpoint_registration_event.link_exception(
            endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.protocol = RaidenProtocol(
            transport,
            discovery,
            self,
            config['protocol']['retry_interval'],
            config['protocol']['retries_before_backoff'],
            config['protocol']['nat_keepalive_retries'],
            config['protocol']['nat_keepalive_timeout'],
            config['protocol']['nat_invitation_timeout'],
        )

        # TODO: remove this cyclic dependency
        transport.protocol = self.protocol

        self.message_handler = RaidenMessageHandler(self)
        self.state_machine_event_handler = StateMachineEventHandler(self)
        self.blockchain_events = BlockchainEvents()
        self.on_message = self.message_handler.on_message
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self._block_number = None
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.transaction_log = StateChangeLog(
            storage_instance=StateChangeLogSQLiteBackend(
                database_path=config['database_path']))
        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            self.database_dir = os.path.dirname(self.database_path)
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.snapshot_dir = os.path.join(self.database_dir, 'snapshots')
            self.serialization_file = os.path.join(self.snapshot_dir,
                                                   'data.pickle')

            if not os.path.exists(self.snapshot_dir):
                os.makedirs(self.snapshot_dir)

            # Prevent concurrent acces to the same db
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_dir = None
            self.lock_file = None
            self.snapshot_dir = None
            self.serialization_file = None
            self.db_lock = None

        # If the endpoint registration fails the node will quit, this must
        # finish before starting the protocol
        endpoint_registration_event.join()

        self.start()
Ejemplo n.º 11
0
 def __init__(self, *args, **kwargs):
     self.items = list()
     self.event = Event()
     self.event.set()
Ejemplo n.º 12
0
 def __init__(self):
     self.read = []
     self.write = []
     self.event = Event()
Ejemplo n.º 13
0
__ssl__ = __import__('ssl')

try:
    _ssl = __ssl__._ssl
except AttributeError:
    _ssl = __ssl__._ssl2

KEYFILE = 'ava-keys.yml'


logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())

__agent = None

agent_running = Event()
agent_stopped = Event()


def new_sslwrap(sock, server_side=False, keyfile=None, certfile=None,
                cert_reqs=__ssl__.CERT_NONE,
                ssl_version=__ssl__.PROTOCOL_SSLv23, ca_certs=None,
                ciphers=None):
    context = __ssl__.SSLContext(ssl_version)
    context.verify_mode = cert_reqs or __ssl__.CERT_NONE
    if ca_certs:
        context.load_verify_locations(ca_certs)
    if certfile:
        context.load_cert_chain(certfile, keyfile)
    if ciphers:
        context.set_ciphers(ciphers)
Ejemplo n.º 14
0
# -*- coding: utf-8 -*-
import os
import re
import threading
from datetime import datetime
from gevent.event import Event
from subprocess import Popen, PIPE

from django.db import models
from django.conf import settings
from django.core.cache import cache
from django.contrib.auth.models import User

from example.builders import Registry

got_build_output = Event()
build_started = Event()
build_finished = Event()


class GitBuilder(Registry):
    name = 'Git Builder'
    scheme = 'git://'


class Project(models.Model):
    name = models.CharField(max_length=100)
    backend = models.CharField(max_length=100, choices=Registry.as_choices())
    created_at = models.DateTimeField(auto_now_add=True)
    last_changed = models.DateTimeField(null=True, blank=True)
    build_command = models.TextField()
Ejemplo n.º 15
0
from gevent.event import Event
from gevent.queue import Queue
from ping_pong_chat.aio_queue import AGQueue

received_event = Event()
leave_rooms_event = Event()
exit_event = Event()
output_message_queue = AGQueue()
input_message_queue = AGQueue()

matrix_to_aio_queue = AGQueue()
aio_to_matrix_queue = AGQueue()
sync_to_matrix_queue = Queue()

SERVER_URL = "https://transport.transport01.raiden.network"
Ejemplo n.º 16
0
    def __init__(
            self,
            chain: BlockChainService,
            default_registry: TokenNetworkRegistry,
            default_secret_registry: SecretRegistry,
            private_key_bin,
            transport,
            config,
            discovery=None,
    ):
        if not isinstance(private_key_bin, bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        invalid_timeout = (
            config['settle_timeout'] < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN or
            config['settle_timeout'] > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX
        )
        if invalid_timeout:
            raise ValueError('settle_timeout must be in range [{}, {}]'.format(
                NETTINGCHANNEL_SETTLE_TIMEOUT_MIN, NETTINGCHANNEL_SETTLE_TIMEOUT_MAX,
            ))

        self.tokennetworkids_to_connectionmanagers = dict()
        self.identifier_to_results = defaultdict(list)

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.default_secret_registry = default_secret_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)
        self.discovery = discovery

        if config['transport_type'] == 'udp':
            endpoint_registration_event = gevent.spawn(
                discovery.register,
                self.address,
                config['external_ip'],
                config['external_port'],
            )
            endpoint_registration_event.link_exception(endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent access to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        if config['transport_type'] == 'udp':
            # If the endpoint registration fails the node will quit, this must
            # finish before starting the transport
            endpoint_registration_event.join()

        self.event_poll_lock = gevent.lock.Semaphore()

        self.start()
Ejemplo n.º 17
0
    def __init__(self, sock, address, id, stratum_clients, config, net_state,
                 server_state, celery):
        self.logger.info("Recieving stratum connection from addr {} on sock {}"
                         .format(address, sock))

        # Seconds before sending keepalive probes
        sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 120)
        # Interval in seconds between keepalive probes
        sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1)
        # Failed keepalive probles before declaring other end dead
        sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)

        # global items
        self.config = config
        self.net_state = net_state
        self.stratum_clients = stratum_clients
        self.server_state = server_state
        self.celery = celery

        # register client into the client dictionary
        self.sock = sock

        # flags for current connection state
        self._disconnected = False
        self.authenticated = False
        self.subscribed = False
        self.address = None
        self.worker = ''
        # the worker id. this is also extranonce 1
        self.id = hexlify(struct.pack('Q', id))
        self.stratum_clients[self.id] = self
        # subscription id for difficulty on stratum
        self.subscr_difficulty = None
        # subscription id for work notif on stratum
        self.subscr_notify = None

        # all shares keyed by timestamp. will get flushed after a period
        # specified in config
        self.valid_shares = {}
        self.dup_shares = {}
        self.stale_shares = {}
        self.low_diff_shares = {}
        # running total for vardiff
        self.accepted_shares = 0
        # debugging entry
        self.transmitted_shares = 0
        # an index of jobs and their difficulty
        self.job_mapper = {}
        # last time we sent graphing data to the server
        self.time_seed = random.uniform(0, 10)  # a random value to jitter timings by
        self.last_graph_transmit = time() - self.time_seed
        self.last_diff_adj = time() - self.time_seed
        self.difficulty = self.config['start_difficulty']
        # the next diff to be used by push job
        self.next_diff = self.config['start_difficulty']
        self.connection_time = int(time())
        self.msg_id = None

        # trigger to send a new block notice to a user
        self.new_block_event = None
        self.new_block_event = Event()
        self.new_block_event.rawlink(self.new_block_call)
        self.new_work_event = None
        self.new_work_event = Event()
        self.new_work_event.rawlink(self.new_work_call)

        # where we put all the messages that need to go out
        self.write_queue = Queue()
        write_greenlet = None

        try:
            self.peer_name = sock.getpeername()
            self.fp = sock.makefile()
            write_greenlet = spawn(self.write_loop)
            self.read_loop()
        except socket.error:
            self.logger.debug("Socket error closing connection", exc_info=True)
        except Exception:
            self.logger.error("Unhandled exception!", exc_info=True)
        finally:
            if write_greenlet:
                write_greenlet.kill()

            try:
                self.sock.shutdown(socket.SHUT_RDWR)
            except socket.error:
                pass
            try:
                self.fp.close()
                self.sock.close()
            except (socket.error, AttributeError):
                pass
            self.report_shares(flush=True)
            self.server_state['stratum_disconnects'].incr()
            del self.stratum_clients[self.id]
            addr_worker = (self.address, self.worker)
            # clear the worker from the luts
            try:
                del self.stratum_clients['addr_worker_lut'][addr_worker]
            except KeyError:
                pass
            try:
                # remove from lut for address
                self.stratum_clients['address_lut'][self.address].remove(self)
                # delete the list if its empty
                if not len(self.stratum_clients['address_lut'][self.address]):
                    del self.stratum_clients['address_lut'][self.address]
            except (ValueError, KeyError):
                pass

            self.logger.info("Closing connection for client {}".format(self.id))
Ejemplo n.º 18
0
from gevent.event import Event

from gluon.template import render
from gluon.dal import Field
from gluon.globals import current
from gluon.serializers import json

new_message_event = Event()

MESSAGE_TEMPLATE = ("""<div class="message" id="m{{ =message['id'] }}">"""
                    """<b>{{ =message['me_from'] }}: </b>"""
                    """<pre>{{ =message['me_body'] }}</pre></div>""")


def _create_message(db, from_, body):
    data = {'me_from': from_, 'me_body': body}
    mid = db.chat.insert(**data)
    data['id'] = str(mid)

    data['me_html'] = render(MESSAGE_TEMPLATE, context={'message': data})
    db(db.chat.id == mid).update(me_html=data['me_html'])
    return data


# main page that renders the chat
def index(db):
    chat_set = db(db.chat.id > 0)
    # how many messages do we have?
    n = chat_set.count()
    # get the last ten
    rows = chat_set.select(limitby=(n - 10, 10))
Ejemplo n.º 19
0
 def __init__(self, *args, **kwargs):
     super(GeventCursor, self).__init__(*args, **kwargs)
     self.new_response = Event()
Ejemplo n.º 20
0
    # exiting because of timeout (the spawned greenlet still runs)
    for _ in xrange(2):
        x = gevent.spawn_later(10, lambda: 5)
        with expected_time(SMALL):
            result = gevent.wait(timeout=SMALL)
        assert result is False, repr(result)
        assert not x.dead, x
        x.kill()
        with no_time():
            result = gevent.wait()
        assert result is True

    # exiting because of event (the spawned greenlet still runs)
    for _ in xrange(2):
        x = gevent.spawn_later(10, lambda: 5)
        event = Event()
        event_set = gevent.spawn_later(SMALL, event.set)
        with expected_time(SMALL):
            result = gevent.wait([event])
        assert result == [event], repr(result)
        assert not x.dead, x
        assert event_set.dead
        assert event.is_set()
        x.kill()
        with no_time():
            result = gevent.wait()
        assert result is True

    # checking "ref=False" argument
    for _ in xrange(2):
        gevent.get_hub().loop.timer(10, ref=False).start(lambda: None)
Ejemplo n.º 21
0
 def pause(self):
     if self._pause_waiter is None:
         self._pause_waiter = Event()
     self._pause_waiter.clear()
Ejemplo n.º 22
0
 def __init__(self, web3, mine_sleep=1):
     super().__init__()
     self.web3 = web3
     self.mine_sleep = mine_sleep
     self.stop = Event()
Ejemplo n.º 23
0
import json
from flask import Blueprint, Response
from gevent.event import Event, AsyncResult
from gevent import sleep
from core.case.callbacks import WorkflowShutdown, FunctionExecutionSuccess, StepExecutionError
from datetime import datetime
from server.security import jwt_required_in_query
import server.workflowresults  # do not delete needed to register callbacks

workflowresults_page = Blueprint('workflowresults_page', __name__)

__workflow_shutdown_event_json = AsyncResult()
__workflow_step_event_json = AsyncResult()
__sync_signal = Event()
__step_signal = Event()


def __workflow_shutdown_event_stream():
    while True:
        data = __workflow_shutdown_event_json.get()
        yield 'data: %s\n\n' % data
        __sync_signal.wait()


def __workflow_steps_event_stream():
    while True:
        data = __workflow_step_event_json.get()
        yield 'data: %s\n\n' % data
        __step_signal.wait()

Ejemplo n.º 24
0
    def rc_tokenLogin(self, processor, account, token, serverno, platform):
        """玩家令牌登陆"""
        if (account, token) in self.account_token:
            return 0, errcode.EC_TOKEN_ERR

        self.account_token.append((account, token))
        #先校验令牌有效性
        if config.serverNo == "039999":  #启用免登录验证,用于压力测试
            chn = 3
        else:
            request = utility.post(config.login_url,
                                   json=dict(uid=account,
                                             token=token,
                                             server_id=config.serverNo))
            task = spawn(request.send)
            gevent.joinall([task], timeout=3)

            try:
                respData = ujson.loads(request.response.content)
                request.response.close()
                request.session.close()
            except:
                return 0, errcode.EC_LOGIN_ERR

            rs = respData.get("success", 0)
            chn = respData.get("channel_id", 0)
            err = respData.get("err_msg", "")
            if not rs:
                Game.glog.log2File(
                    "rc_tokenLoginError", "%s|%s|%s|%s|%s" %
                    (config.login_url, config.serverNo, account, token, err))
                return 0, errcode.EC_TOKEN_ERR

        d = Game.store.query_loads(Player.DATA_CLS.TABLE_NAME,
                                   dict(account=account))

        if not d:
            data = dict(account=account,
                        channel=chn,
                        platform=platform,
                        newTime=current_time())
            try:
                _id = Game.store.insert(Player.DATA_CLS.TABLE_NAME, data)
                data['id'] = _id
                d = [data]
            except:
                return 0, errcode.EC_SAME_ACCOUNT
        if not d:
            return 0, errcode.EC_LOGIN_ERR
        # log.debug("%s", d)
        pid = None
        for one in d:
            pid = one.get('id')
            if not pid:
                continue
            if str(pid)[-6:] == serverno:
                break
        if not pid:
            return 0, errcode.EC_PLAYER_EMPTY
        self.pid_login_waiter[pid] = Event()
        rs, d = self._login(processor, pid)
        if not rs:
            return rs, d
        if pid in self.pid_login_waiter:
            waiter = self.pid_login_waiter[pid]
            waiter.wait(10)
        if pid in self.pid_login_waiter:
            self.pid_login_waiter.pop(pid, None)
        self.account_token.remove((account, token))
        return rs, d
Ejemplo n.º 25
0
def run(config_filename, passphrases):
    log.info("Running with pid %i", os.getpid())

    # Start our worker pool now, before we create our sockets for the web app
    # otherwise the workers inherit the file descriptors for the http(s)
    # socket and we have problems shutting down cleanly
    global _sha1sum_worker_pool
    if not _sha1sum_worker_pool:
        _sha1sum_worker_pool = multiprocessing.Pool(None, init_worker)
    app = None
    listener = None
    server = None
    backdoor = None
    handler = None
    backdoor_state = {}
    while True:
        log.info("Loading configuration")
        config = load_config(config_filename)
        if not app:
            app = SigningServer(config, passphrases)
        else:
            app.load_config(config)

        listen_addr = (config.get('server',
                                  'listen'), config.getint('server', 'port'))
        if not listener or listen_addr != listener.getsockname():
            if listener and server:
                log.info(
                    "Listening address has changed, stopping old wsgi server")
                log.debug("Old address: %s", listener.getsockname())
                log.debug("New address: %s", listen_addr)
                server.stop()
            listener = gevent.socket.socket()
            listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            listener.bind(listen_addr)
            listener.listen(256)

        server = create_server(app, listener, config)

        backdoor_state['server'] = server
        backdoor_state['app'] = app

        if config.has_option('server', 'backdoor_port'):
            backdoor_port = config.getint('server', 'backdoor_port')
            if not backdoor or backdoor.server_port != backdoor_port:
                if backdoor:
                    log.info("Stopping old backdoor on port %i",
                             backdoor.server_port)
                    backdoor.stop()
                log.info("Starting backdoor on port %i", backdoor_port)
                backdoor = gevent.backdoor.BackdoorServer(
                    ('127.0.0.1', backdoor_port), locals=backdoor_state)
                gevent.spawn(backdoor.serve_forever)

        # Handle SIGHUP
        # Create an event to wait on
        # Our SIGHUP handler will set the event, allowing us to continue
        sighup_event = Event()
        h = gevent.signal(signal.SIGHUP, lambda e: e.set(), sighup_event)
        if handler:
            # Cancel our old handler
            handler.cancel()
        handler = h
        log.info("Serving on %s", repr(server))
        try:
            gevent.spawn(server.serve_forever)
            # Wait for SIGHUP
            sighup_event.wait()
        except KeyboardInterrupt:
            break
    log.info("pid %i exiting normally", os.getpid())
Ejemplo n.º 26
0
    def __init__(
            self,
            chain: BlockChainService,
            query_start_block: BlockNumber,
            default_registry: TokenNetworkRegistry,
            default_secret_registry: SecretRegistry,
            transport,
            raiden_event_handler,
            message_handler,
            config,
            discovery=None,
    ):
        super().__init__()
        self.tokennetworkids_to_connectionmanagers = dict()
        self.targets_to_identifiers_to_statuses: StatusesDict = defaultdict(dict)

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.query_start_block = query_start_block
        self.default_secret_registry = default_secret_registry
        self.config = config

        self.signer: Signer = LocalSigner(self.chain.client.privkey)
        self.address = self.signer.address
        self.discovery = discovery
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.raiden_event_handler = raiden_event_handler
        self.message_handler = message_handler

        self.stop_event = Event()
        self.stop_event.set()  # inits as stopped

        self.wal = None
        self.snapshot_group = 0

        # This flag will be used to prevent the service from processing
        # state changes events until we know that pending transactions
        # have been dispatched.
        self.dispatch_events_lock = Semaphore(1)

        self.contract_manager = ContractManager(config['contracts_path'])
        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir

            # Two raiden processes must not write to the same database, even
            # though the database itself may be consistent. If more than one
            # nodes writes state changes to the same WAL there are no
            # guarantees about recovery, this happens because during recovery
            # the WAL replay can not be deterministic.
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        self.event_poll_lock = gevent.lock.Semaphore()
        self.gas_reserve_lock = gevent.lock.Semaphore()
        self.payment_identifier_lock = gevent.lock.Semaphore()
Ejemplo n.º 27
0
    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects

        dp_obj = IonObject(RT.DataProduct,
                           name='DP1',
                           description='some new dp')

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(
            data_product=dp_obj, stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # Subscribe to persist events
        #------------------------------------------------------------------------------------------------
        queue = gevent.queue.Queue()

        def info_event_received(message, headers):
            queue.put(message)

        es = EventSubscriber(event_type=OT.InformationContentStatusEvent,
                             callback=info_event_received,
                             origin=dp_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id,
                                                    predicate=PRED.hasDataset,
                                                    id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" %
                           str(dp_id))
        dataset_id = dataset_ids[0]

        # Check that the streams associated with the data product are persisted with
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                   RT.Stream, True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id, route)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug(
            "The data retriever was able to replay the dataset that was attached to the data product "
            "we wanted to be persisted. Therefore the data product was indeed persisted with "
            "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
            "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'"
        )

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name, 'DP1')
        self.assertEquals(data_product_object.description, 'some new dp')

        log.debug(
            "Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
            " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
            "resource registry, name='%s', desc='%s'" %
            (dp_obj.name, dp_obj.description, data_product_object.name,
             data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        dataset_modified.clear()

        rdt['time'] = np.arange(20, 40)

        publisher.publish(rdt.to_granule())
        self.assertFalse(dataset_modified.wait(2))

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        dataset_modified.clear()

        publisher.publish(rdt.to_granule())
        self.assertTrue(dataset_modified.wait(30))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_almost_equal(rdt['time'], np.arange(40))

        dataset_ids, _ = self.rrclient.find_objects(dp_id,
                                                    PRED.hasDataset,
                                                    id_only=True)
        self.assertEquals(len(dataset_ids), 1)

        self.dpsc_cli.suspend_data_product_persistence(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)

        info_event_counter = 0
        runtime = 0
        starttime = time.time()
        caught_events = []

        #check that the four InfoStatusEvents were received
        while info_event_counter < 4 and runtime < 60:
            a = queue.get(timeout=60)
            caught_events.append(a)
            info_event_counter += 1
            runtime = time.time() - starttime

        self.assertEquals(info_event_counter, 4)
Ejemplo n.º 28
0
 def __init__(self):
     self._eid = 0
     self._event = Event()
     self._w = self._h = self._health = None
     self.size_changed_count = 0
Ejemplo n.º 29
0
'''
User state machine:
     [Observing]     --------------<------------<-----------
          |          |                                     |
    -> [Hang] <-> [InRoomWait] <-> [Ready] -> [InGame] -->----
        |                  |         |             |
        --->[[Disconnect]]<-------------------------
'''

# should use WeakSet or WeakValueDictionary,
# but this works fine, not touching it.
games = {}  # all games
users = {}  # all users
dropped_users = {}  # passively dropped users
evt_datachange = Event()

if options.gidfile:
    try:
        with open(options.gidfile, 'r') as f:
            _curgameid = int(f.read())
    except:
        _curgameid = 0
else:
    _curgameid = 0


def new_gameid():
    global _curgameid
    _curgameid += 1
    return _curgameid
Ejemplo n.º 30
0
 def calc_event(self):
     from gevent.event import Event
     return Event()