Example #1
0
    def __init__(self,
                 location,
                 repo_name=None,
                 trusted_keyrings: list[str] = None,
                 entity=None):

        if not trusted_keyrings:
            trusted_keyrings = []

        lconf = LocalConfig()
        if not repo_name:
            repo_name = 'unknown'
        if is_remote_url(location):
            self._root_dir = os.path.join(lconf.cache_dir, 'repo_cache',
                                          repo_name)
            os.makedirs(self._root_dir, exist_ok=True)
            self._repo_url = location
        else:
            self._root_dir = location
            self._repo_url = None

        self._keyrings = trusted_keyrings
        self._trusted = False
        self._name = repo_name

        if entity:
            self._repo_entity = entity
        else:
            self._repo_entity = ArchiveRepository(self._name)

        self._inrelease: dict[str, Repository.InReleaseData] = {}
Example #2
0
File: env.py Project: rr4/laniakea
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    from laniakea.localconfig import LocalConfig

    lconf = LocalConfig()
    ini_section = config.get_section(config.config_ini_section)
    ini_section['sqlalchemy.url'] = lconf.database_url

    connectable = engine_from_config(
        ini_section,
        prefix="sqlalchemy.",
        poolclass=pool.NullPool,
    )

    with connectable.connect() as connection:
        context.configure(
            connection=connection, target_metadata=target_metadata
        )

        with context.begin_transaction():
            context.run_migrations()
Example #3
0
 def __init__(self, repo_entity):
     lconf = LocalConfig()
     self._repo = Repository(lconf.archive_root_dir,
                             'master',
                             entity=repo_entity)
     self._repo_entity = repo_entity
     self._repo.set_trusted(True)
Example #4
0
    def __init__(self, module):
        self._module = str(module)
        lconf = LocalConfig()
        keyfile = lconf.secret_curve_keyfile_for_module(self._module)

        self._zctx = zmq.Context()
        self._socket = create_submit_socket(self._zctx)

        signer_id = None
        signing_key = None
        if os.path.isfile(keyfile):
            signer_id, signing_key = keyfile_read_signing_key(keyfile)

        if self._socket and not signing_key:
            log.warning(
                'Can not publish events: No valid signing key found for this module.'
            )
            self._socket = None
        self._signer_id = signer_id
        self._signing_key = signing_key
Example #5
0
def create_submit_socket(zmq_context):
    '''
    Create a ZeroMQ socket that is connected to a Lighthouse instance in order
    to submit messages to it.
    '''

    lconf = LocalConfig()
    servers = lconf.lighthouse.servers_submit
    if not servers:
        return  # we can't send events, as there are no Lighthouse instances registered

    submit_server = random.choice(servers)

    socket = zmq_context.socket(zmq.DEALER)
    socket.connect(submit_server)

    return socket
Example #6
0
    def __init__(self):
        from glob import glob
        from laniakea.localconfig import LocalConfig
        from laniakea.msgstream import keyfile_read_verify_key

        self._zctx = zmq.Context()
        self._lhsub_socket = create_event_listen_socket(self._zctx)
        self._mconf = MirkConfig()
        self._mconf.load()

        # Read all the keys that we trust, to verify messages
        # TODO: Implement auto-reloading of valid keys list if directory changes
        self._trusted_keys = {}
        for keyfname in glob(
                os.path.join(LocalConfig().trusted_curve_keys_dir, '*')):
            signer_id, verify_key = keyfile_read_verify_key(keyfname)
            if signer_id and verify_key:
                self._trusted_keys[signer_id] = verify_key
Example #7
0
def create_event_listen_socket(zmq_context, subscribed_tags=[]):
    '''
    Create a ZeroMQ socket that is listening to events published on a
    Lighthouse event publisher socket.
    '''

    lconf = LocalConfig()
    publish_server = random.choice(lconf.lighthouse.servers_publish)

    socket = zmq_context.socket(zmq.SUB)
    socket.connect(publish_server)

    if not subscribed_tags:
        socket.setsockopt_string(zmq.SUBSCRIBE, '')
    else:
        for tag in subscribed_tags:
            socket.setsockopt_string(zmq.SUBSCRIBE, tag)

    return socket
Example #8
0
    def __init__(self, endpoint, pub_queue):
        from glob import glob
        from laniakea.localconfig import LocalConfig
        from laniakea.msgstream import keyfile_read_verify_key

        self._socket = None
        self._ctx = zmq.Context.instance()

        self._pub_queue = pub_queue
        self._endpoint = endpoint

        self._trusted_keys = {}

        # TODO: Implement auto-reloading of valid keys list if directory changes
        for keyfname in glob(
                os.path.join(LocalConfig().trusted_curve_keys_dir, '*')):
            signer_id, verify_key = keyfile_read_verify_key(keyfname)
            if signer_id and verify_key:
                self._trusted_keys[signer_id] = verify_key
Example #9
0
    def __init__(self):
        lconf = LocalConfig()

        self._dak_dist_dir = os.path.join(lconf.workspace, 'dist', 'dak')
        self._dak_exe = os.path.join(self._dak_dist_dir, 'dak', 'dak.py')
Example #10
0
def run_server(options):
    import systemd.daemon
    from laniakea.localconfig import LocalConfig

    if options.config_fname:
        LocalConfig(options.config_fname)

    if options.verbose:
        from laniakea.logging import set_verbose
        set_verbose(True)

    lconf = LocalConfig()

    # TODO: Disable server features requiring the database if Lighthouse is
    # configured as relay, making it only forward requests to other instances.

    # event stream plumbing
    pub_queue = None
    publish_endpoints = lconf.lighthouse.endpoints_publish
    if publish_endpoints:
        log.info('Creating event stream publisher.')
        pub_queue = Queue()
        spub = Process(target=run_events_publisher_server,
                       args=(publish_endpoints, pub_queue),
                       name='EventsPublisher',
                       daemon=True)
        spub.start()
        server_processes.append(spub)

        # spawn processes that handle event stream submissions
        log.info('Creating event stream receivers ({}).'.format(
            len(lconf.lighthouse.endpoints_submit)))
        for i, submit_endpoint in enumerate(lconf.lighthouse.endpoints_submit):
            p = Process(target=run_events_receiver_server,
                        args=(submit_endpoint, pub_queue),
                        name='EventsServer-{}'.format(i),
                        daemon=True)
            p.start()
            server_processes.append(p)

    # spawn processes to serve job requests
    log.info('Creating job handlers.')
    for i, jobs_endpoint in enumerate(lconf.lighthouse.endpoints_jobs):
        p = Process(target=run_jobs_server,
                    args=(jobs_endpoint, pub_queue),
                    name='JobsServer-{}'.format(i),
                    daemon=True)
        p.start()
        server_processes.append(p)

    # set up termination signal handler
    signal.signal(signal.SIGQUIT, term_signal_handler)
    signal.signal(signal.SIGTERM, term_signal_handler)
    signal.signal(signal.SIGINT, term_signal_handler)

    # signal readiness
    log.info('Ready.')
    systemd.daemon.notify('READY=1')

    # wait for processes to terminate (possibly forever)
    while True:
        for p in server_processes:
            p.join(20)
            if not p.is_alive():
                log.info('Server worker process has died, shutting down.')
                # one of our workers must have failed, shut down
                for pr in server_processes:
                    pr.terminate()
                    pr.join(10)
                    pr.kill()
                sys.exit(p.exitcode)
Example #11
0
    def __init__(self):
        lconf = LocalConfig()

        self._britney_dir = os.path.join(lconf.workspace, 'dist', 'britney2')
        self._britney_exe = os.path.join(self._britney_dir, 'britney.py')
Example #12
0
    def __init__(self):
        self._lconf = LocalConfig()
        self._britney = Britney()

        self._workspace = os.path.join(self._lconf.workspace, 'spears')
        os.makedirs(self._workspace, exist_ok=True)