Пример #1
0
 def run(self):
     tornado.options.define("port", default=self.port, help="run on the given port", type=int)
     self.__set_log_level_tornado()
     app = self.app()
     app.listen(tornado.options.options.port)
     log.info("Web server started on port %s" % self.port)
     tornado.ioloop.IOLoop.instance().start()
Пример #2
0
 def stop_loop():
     now = time.time()
     if now < deadline and (io_loop._callbacks or io_loop._timeouts):
         io_loop.add_timeout(now + 1, stop_loop)
     else:
         io_loop.stop()
         log.info('Shutdown')
Пример #3
0
    def get(self, args=None):
        # This resource is protected so we should see the REMOTE_USER header
        header_name = self.settings['header']
        remote_user = self.request.headers.get(header_name, "")

        config = self.settings['config']
        log = self.settings['log']
        if remote_user == "":
            log.info(f'Failed to find REMOTE_USER')
            raise web.HTTPError(401, "Hub sharder unable to find auth headers")

        hub = yield self.shard(remote_user)

        self.set_cookie('hub', hub, domain=config['domain'])
        #self.request.headers['Cookie'] = f'hub={hub}'

        # This is for nbgitpuller redirects to work
        if args == 'user-redirect/git-pull':
            params = {}
            for k, v in self.request.arguments.items():
                if len(v) > 0:
                    params[k] = v[0]

            redirect_url = (
                f'https://{hub}/jupyter/hub/user-redirect/git-pull?'
                f'{urllib.parse.urlencode(params)}')

            log.info(f'Performing an nbgitpuller redirect to: {redirect_url}')
            self.redirect(f'{redirect_url}')

        # This is for all other requests
        else:
            self.redirect(f'https://{hub}/jupyter/hub')
Пример #4
0
def import_test(modname):
    """Run an import test

    Just check if it imports!
    """
    log.info("Testing import of " + str(modname))
    importlib.import_module(modname)
Пример #5
0
 def stop_loop():
     now = time.time()
     if now < deadline and (asyncio.all_tasks()):
         log.info("Waiting for tasks to complete...")
         io_loop.add_timeout(now + 1, stop_loop)
     else:
         io_loop.stop()
         log.info("Shutdown complete.")
Пример #6
0
    def run(self):
        # Randomise the cookie secret upon reload if it's not already set.
        if self.cookie_secret is None:
            m = hashlib.md5()
            m.update(
                (str(random.random()) + str(random.random())).encode('utf-8'))
            cookie_secret = m.digest()
        else:
            cookie_secret = self.cookie_secret

        # Create a default handler if the user wants one.
        me = self
        if self.default_handler is not None:

            class default_handler_class(tornado.web.RequestHandler):
                def delete(self, *args, **kwargs):
                    return me.default_handler(self, 'delete', *args, **kwargs)

                def get(self, *args, **kwargs):
                    return me.default_handler(self, 'get', *args, **kwargs)

                def patch(self, *args, **kwargs):
                    return me.default_handler(self, 'patch', *args, **kwargs)

                def post(self, *args, **kwargs):
                    method = self.get_field('_method', '').lower()
                    if method == 'delete':
                        return self.delete(*args, **kwargs)
                    elif method == 'patch':
                        return self.patch(*args, **kwargs)
                    elif method == 'put':
                        return self.put(*args, **kwargs)
                    else:
                        return me.default_handler(self, 'post', *args,
                                                  **kwargs)

                def put(self, *args, **kwargs):
                    return me.default_handler(self, 'put', *args, **kwargs)
        else:
            default_handler_class = None

        # Create the app in debug mode (autoreload), binding to the appropriate address.
        app = tornado.web.Application(
            self.handlers,
            cookie_secret=cookie_secret,
            debug=True,
            default_handler_class=default_handler_class,
            static_path=self.static_path,
        )
        app.listen(port=self.port, address=self.hostname)
        log.info('Reloading... waiting for requests on http://{}:{}'.format(
            self.hostname or 'localhost', self.port))

        # Start the ioloop.
        loop = tornado.ioloop.IOLoop.instance()
        loop.start()
Пример #7
0
def listen(routes, port=None):
    if not port:
        try:
            port = os.environ['PORT']
        except:
            port = 8899

    app = tornado.web.Application(routes.routes)
    log.info('Starting tornado server on 127.0.0.1:%s' % port)
    app.listen(port)
    tornado.ioloop.IOLoop.instance().start()    
Пример #8
0
async def main():
    tornado.options.parse_command_line()
    options.parse_config_file(os.path.join(APP_ROOT, "config/app.conf"))

    s = "s" if options.ssl_enabled else ""
    log.info(f"Starting Tornado on http{s}://localhost:{options.port}/")

    try:
        Database = databases.database_types[options.db_type.lower()]
    except KeyError:
        log.execption("ERROR: Invalid db_type specified. Check valid database types in the docs.")
        return

    log.info("Connecting to database...")

    async with Database() as db:
        app = Application(db)
        db.app = app

        if not await db.is_initialized():
            log.info("Database not initialized, creating roles and Admin user...")

            roles = []
            roles.append(Role.partial(name="Admin", permissions=Permissions.all(), state=app))

            permissions = Permissions.default()
            permissions.manage_files = True
            permissions.manage_invites = True
            roles.append(Role.partial(name="Trusted", permissions=permissions, state=app))

            roles.append(Role.partial(name="User", permissions=Permissions.default(), state=app))

            available_chars = string.ascii_letters + string.digits
            password = "".join(random.choice(available_chars) for i in range(5))
            hashed_password = await User.hash_password(password)
            user = User.partial(username="******", hashed_password=hashed_password, role_id=None, state=app)

            await db.initialize(roles, user)

            log.info(f"Your new admin username is Admin and your password is {password}.")

        global http_server

        http_server = tornado.httpserver.HTTPServer(app)
        http_server.listen(options.port)

        log.info("Ready to go.")

        await shutdown_event.wait()
Пример #9
0
    def get(self):
        # This resource is protected so we should see the REMOTE_USER header
        header_name = self.settings['header']
        remote_user = self.request.headers.get(header_name, "")

        log = self.settings['log']
        if remote_user == "":
            log.info(f'Failed to find REMOTE_USER')
            raise web.HTTPError(401, "Hub sharder unable to find auth headers")

        hub = yield self.shard(remote_user)

        self.set_cookie('hub', hub)
        #self.request.headers['Cookie'] = f'hub={hub}'
        self.redirect(f'https://{hub}/jupyter/hub')
Пример #10
0
def shutdown():
    log.info("Shutting down...")
    http_server.stop()
    shutdown_event.set()

    io_loop = tornado.ioloop.IOLoop.instance()

    deadline = time.time() + MAX_WAIT_SECONDS

    def stop_loop():
        now = time.time()
        if now < deadline and (asyncio.all_tasks()):
            log.info("Waiting for tasks to complete...")
            io_loop.add_timeout(now + 1, stop_loop)
        else:
            io_loop.stop()
            log.info("Shutdown complete.")
    stop_loop()
Пример #11
0
def build_repo(repo,
               resolved_ref,
               checkout_path,
               build_log_file,
               force_build=False):
    """build one repo"""

    image_id = make_image_id(repo, resolved_ref)
    d = docker.from_env()
    try:
        image = d.images.get(image_id)
    except docker.errors.ImageNotFound:
        # need to build
        pass
    else:
        log.info(f"Already have image {image_id}")
        if not force_build:
            with open(build_log_file, "w") as f:
                f.write(f"Image {image_id} already built")
            return image_id, checkout_path

    log.info(f"Building image {image_id} for {repo}@{resolved_ref}")

    with tee(build_log_file) as stdout:
        try:
            run(
                [
                    "jupyter-repo2docker",
                    "--no-run",
                    "--no-clean",
                    "--image-name",
                    image_id,
                    checkout_path,
                ],
                stdout=stdout,
                stderr=STDOUT,
                check=True,
            )
        finally:
            stdout.flush()
    return image_id, checkout_path
Пример #12
0
    def post(self, path_file):
        global connection_factory
        username = self.get_body_argument("username")
        password = self.get_body_argument("password")

        if len(path_file) == 0:
            path_file = '/'

        log = logging.getLogger(__name__)
        log.info('Login, username="******", path_file="{}"'.format(
            username, path_file))

        connection = connection_factory.create_connection_and_connect()
        rsp = connection.authenticate(username, password)

        if not rsp.is_error():
            user_id = user_sessions.add_session(
                UserSession(username, password, connection))
            log.info('New user session created for "{}"'.format(username))
            self.set_secure_cookie(COOKIE_NAME,
                                   str(user_id),
                                   expires_days=COOKIE_DURATION_DAYS)
        else:
            log.info('Failed to login, username="******", error="%d"' %
                     (username, rsp.error_code()))

        self.redirect(path_file)
Пример #13
0
def shutdown():
    log.info('Stopping http server')
    http_server.stop()

    if hasattr(http_server.request_callback, 'shutdown_hook'):
        http_server.request_callback.shutdown_hook()

    log.info('Will shutdown in %s seconds ...',
             MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)
    io_loop = tornado.ioloop.IOLoop.instance()

    deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN

    def stop_loop():
        now = time.time()
        if now < deadline and (io_loop._callbacks or io_loop._timeouts):
            io_loop.add_timeout(now + 1, stop_loop)
        else:
            io_loop.stop()
            log.info('Shutdown')

    stop_loop()
Пример #14
0
    async def get(self, *args, **kwargs):
        q = self.request.query_arguments
        if self.application.notifier_key and ('key' not in q or q['key'][0].decode() != self.application.notifier_key):
            return self.send_error(403)
        log.info("Query")
        self.send_cors_headers()
        self.add_header('Content-Type', 'application/json; charset=utf-8')
        self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
        m = self.request.body
        if m:
            m = json.loads(m)

        await self.finish(json.dumps(m))

        if 'overwrite' in m and m['overwrite'] and 'type' in m:
            i = 0
            while i < len(queue):
                if 'type' in queue[i] and m['type'] == queue[i]['type']:
                    del queue[i]
                else:
                    i += 1
        await queue.push(m)
Пример #15
0
 def timeline_build(self, config):
     # Set configuration in the timeline
     if 'filter' in config and config['filter']:
         self.client.timeline_filter = config['filter']
     if 'packet' in config:
         self.client.timeline_filter['time'] = {
             'start': datetime.datetime.utcfromtimestamp(config['packet']['from']/1000),
             'stop': datetime.datetime.utcfromtimestamp(config['packet']['to']/1000)
         }
     # Execute request
     if 'play' in config or 'packet' in config:
         # Configure the helper thread
         if not self.timeline_th or not self.timeline_th.isAlive():
             self.timeline_th = TimelinePacketProcessHelper(self, config['clean'], self.client.timeline_filter)
         # Set real time environment
         if 'play' in config:
             self.timeline_th.real_time = config['play']
         # Start the process
         if not self.timeline_th.isAlive():
             self.timeline_th.start()
         else:
             log.info("Timeline thread is running, updating only some information")
Пример #16
0
def run_tests(image, checkout_path, run_dir):
    """Find tests to run and run them"""
    notebooks = list(find_notebooks(checkout_path))
    count = len(notebooks)
    log.info(f"Found {count} to test")
    if notebook_limit and count > notebook_limit:
        log.info(f"Limiting to first {notebook_limit}/{count} notebooks")
        notebooks = notebooks[:notebook_limit]
    for nb_path in notebooks:
        test_log_file = os.path.join(
            run_dir, "logs",
            f"test-notebook-{nb_path.replace('/', '-')}-{run_id}.txt")
        try:
            yield run_one_test(image, "notebook", nb_path, run_dir,
                               test_log_file)
        except Exception:
            log.exception(f"Error running test {nb_path}")
            yield {
                "kind": "notebook",
                "success": False,
                "test_id": nb_path,
                "path": test_log_file,
            }
Пример #17
0
    def add_result(kind, test_id, success, path):
        path = os.path.relpath(path, run_dir)
        log.info(
            f"Recording test result: repo={repo}, kind={kind}, test_id={test_id}, {'success' if success else 'failure'}"
        )
        result = TestResult(
            repo,
            ref,
            resolved_ref,
            last_modified,
            kind,
            test_id,
            success,
            path,
            timestamp,
            run_id,
            repo2docker.__version__,
        )
        results.append(result)

        with open(result_file, "a") as f:
            writer = csv.writer(f)
            writer.writerow(result)
Пример #18
0
def clone_repo(repo, ref):
    """Clone a repo, return checkout path and resolved ref"""
    slug = repo_slug(repo)

    td = tempfile.mkdtemp(prefix=f"r2d-test-{run_id}")
    checkout_path = os.path.join(td, slug)
    log.info(f"Cloning {repo}@{ref} to {checkout_path}")
    try:
        os.makedirs(checkout_path)
    except FileExistsError:
        pass

    cp = Git()
    spec = cp.detect(repo, ref=ref)
    for line in cp.fetch(spec, output_dir=checkout_path, yield_output=echo):
        echo(line)

    resolved_ref = cp.content_id or ref
    with cd(checkout_path):
        output = check_output(
            ["git", "log", "-1", "--date=iso-strict", "--format=%ad"])
        timestamp = output.decode("utf8").strip()
    log.info(f"Cloned {repo}@{ref}: commit {resolved_ref} at {timestamp}")
    return checkout_path, resolved_ref, timestamp
Пример #19
0
 def get(self):
     with self.session_context() as session:
         hash_id = dbi.get_random_hash_id(session)
     log.info('redirecting to url /{0}', hash_id)
     self.redirect('/' + hash_id, status=303)
Пример #20
0
    def get(self, path):
        global user_sessions
        global connection_factory
        global server_address
        log = logging.getLogger(__name__)
        cookie_user_id = None
        session = None

        try:
            cookie_user_id = int(
                self.get_secure_cookie(COOKIE_NAME,
                                       max_age_days=COOKIE_DURATION_DAYS))
        except ValueError:
            pass
        except TypeError:
            pass

        if cookie_user_id is not None:
            session = user_sessions.find_session(cookie_user_id)

        if session is not None:
            connection = session.create_connection()
            rsp = connection.allocate_authentication_token()
            connection.disconnect()

            if rsp.is_error():
                log.error(
                    'Failed to allocate login token for user "{}", code: {}'.
                    format(
                        session.username(),
                        rsp.error_code(),
                    ))
                raise RuntimeError()

            auth_token = rsp.as_allocate_auth_token_response().token
            log.info(
                'Existing session found authentication token allocated, username="******"'
                .format(session.username()))

            if len(path) > 1:
                path = zyn_util.util.normalized_remote_path('/' + path)
                path_parent, name = zyn_util.util.split_remote_path(path)
            else:
                path_parent, name = ('/', '')

            log.info('get, path_parent="{}", name="{}"'.format(
                path_parent,
                name,
            ))

            self.render(
                "main.html",
                zyn_user_id=str(cookie_user_id),
                root_url=self.HANDLER_URL,
                path_parent=path_parent,
                name=name,
                authentication_token=auth_token,
                server_address=server_address,
            )
        else:
            log.info('Unauthenticated user')
            self.render("login.html")
Пример #21
0
def main():
    """
    Entry point for GNS3 GUI.
    """

    parser = argparse.ArgumentParser()
    parser.add_argument('--version', help="show the version", action='version', version=__version__)
    parser.add_argument('--debug', help="print out debug messages", action='store_true', default=False)
    options = parser.parse_args()
    exception_file_path = "exception.log"

    def exceptionHook(exception, value, tb):

        if exception == KeyboardInterrupt:
            sys.exit(0)

        lines = traceback.format_exception(exception, value, tb)
        print("****** Exception detected, traceback information saved in {} ******".format(exception_file_path))
        print("\nPLEASE REPORT ON https://community.gns3.com/community/support/bug\n")
        print("".join(lines))
        try:
            curdate = time.strftime("%d %b %Y %H:%M:%S")
            logfile = open(exception_file_path, "a")
            logfile.write("=== GNS3 {} traceback on {} ===\n".format(__version__, curdate))
            logfile.write("".join(lines))
            logfile.close()
        except OSError as e:
            print("Could not save traceback to {}: {}".format(exception_file_path, e))

        if not sys.stdout.isatty():
            # if stdout is not a tty (redirected to the console view),
            # then print the exception on stderr too.
            print("".join(lines), file=sys.stderr)

    # catch exceptions to write them in a file
    sys.excepthook = exceptionHook

    current_year = datetime.date.today().year
    print("GNS3 GUI version {}".format(__version__))
    print("Copyright (c) 2007-{} GNS3 Technologies Inc.".format(current_year))

    # we only support Python 2 version >= 2.7 and Python 3 version >= 3.3
    if sys.version_info < (2, 7):
        raise RuntimeError("Python 2.7 or higher is required")
    elif sys.version_info[0] == 3 and sys.version_info < (3, 3):
        raise RuntimeError("Python 3.3 or higher is required")

    version = lambda version_string: [int(i) for i in version_string.split('.')]

    if version(QtCore.QT_VERSION_STR) < version("4.6"):
        raise RuntimeError("Requirement is Qt version 4.6 or higher, got version {}".format(QtCore.QT_VERSION_STR))

    # 4.8.3 because of QSettings (http://pyqt.sourceforge.net/Docs/PyQt4/pyqt_qsettings.html)
    if DEFAULT_BINDING == "PyQt" and version(QtCore.BINDING_VERSION_STR) < version("4.8.3"):
        raise RuntimeError("Requirement is PyQt version 4.8.3 or higher, got version {}".format(QtCore.BINDING_VERSION_STR))

    if DEFAULT_BINDING == "PySide" and version(QtCore.BINDING_VERSION_STR) < version("1.0"):
        raise RuntimeError("Requirement is PySide version 1.0 or higher, got version {}".format(QtCore.BINDING_VERSION_STR))

    try:
        # if tornado is present then enable pretty logging.
        import tornado.log
        tornado.log.enable_pretty_logging()
    except ImportError:
        pass

    # check for the correct locale
    # (UNIX/Linux only)
    locale_check()

    try:
        os.getcwd()
    except FileNotFoundError:
        log.critical("the current working directory doesn't exist")
        return

    # always use the INI format on Windows and OSX (because we don't like the registry and plist files)
    if sys.platform.startswith('win') or sys.platform.startswith('darwin'):
        QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat)

    if sys.platform.startswith('win'):
        try:
            import win32console
            import win32con
            import win32gui
        except ImportError:
            raise RuntimeError("Python for Windows extensions must be installed.")

        try:
            win32console.AllocConsole()
            console_window = win32console.GetConsoleWindow()
            win32gui.ShowWindow(console_window, win32con.SW_HIDE)
        except win32console.error as e:
            print("warning: could not allocate console: {}".format(e))

    exit_code = MainWindow.exit_code_reboot
    while exit_code == MainWindow.exit_code_reboot:

        exit_code = 0
        app = QtGui.QApplication(sys.argv)

        # this info is necessary for QSettings
        app.setOrganizationName("GNS3")
        app.setOrganizationDomain("gns3.net")
        app.setApplicationName("GNS3")
        app.setApplicationVersion(__version__)

        # save client logging info to a file
        logfile = os.path.join(os.path.dirname(QtCore.QSettings().fileName()), "GNS3_client.log")
        try:
            try:
                os.makedirs(os.path.dirname(QtCore.QSettings().fileName()))
            except FileExistsError:
                pass
            handler = logging.FileHandler(logfile, "w")
            if options.debug:
                root_logger = logging.getLogger()
                root_logger.setLevel(logging.DEBUG)
                if len(root_logger.handlers) > 0:
                    root_handler = root_logger.handlers[0]
                else:
                    root_handler = logging.StreamHandler()
                    root_logger.addHandler(root_handler)
                root_handler.setLevel(logging.DEBUG)
            else:
                handler.setLevel(logging.INFO)
            log.info('Log level: {}'.format(logging.getLevelName(log.getEffectiveLevel())))

            formatter = logging.Formatter("[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s",
                                          datefmt="%y%m%d %H:%M:%S")
            handler.setFormatter(formatter)
            log.addHandler(handler)
        except OSError as e:
            log.warn("could not log to {}: {}".format(logfile, e))

        # update the exception file path to have it in the same directory as the settings file.
        exception_file_path = os.path.join(os.path.dirname(QtCore.QSettings().fileName()), exception_file_path)

        mainwindow = MainWindow.instance()
        mainwindow.show()
        exit_code = app.exec_()
        delattr(MainWindow, "_instance")
        app.deleteLater()

    sys.exit(exit_code)
Пример #22
0
def run_notebook(nb_path, output_dir):
    """Run a notebook tests

    executes the notebook and stores the output in a file
    """

    import nbformat
    from jupyter_client.kernelspec import KernelSpecManager
    from nbconvert.preprocessors.execute import executenb
    from datetime import datetime

    log.info("Testing notebook " + str(nb_path))
    with open(nb_path) as f:
        nb = nbformat.read(f, as_version=4)

    kernel_specs = KernelSpecManager().get_all_specs()
    kernel_info = nb.metadata.get("kernelspec") or {}
    kernel_name = kernel_info.get("name", "")
    kernel_language = kernel_info.get("language") or ""
    if kernel_name in kernel_specs:
        log.info("Found kernel " + str(kernel_name))
    elif kernel_language:
        log.warning("No such kernel " + str(kernel_name) +
                    ", falling back on kernel language=" +
                    str(kernel_language))
        kernel_language = kernel_language.lower()
        # no exact name match, re-implement js notebook fallback,
        # using kernel language instead
        # nbconvert does not implement this, but it should
        for kernel_spec_name, kernel_info in kernel_specs.items():
            if (kernel_info.get("spec",
                                {}).get("language",
                                        "").lower() == kernel_language):
                log.warning("Using kernel " + str(kernel_spec_name) +
                            " to provide language: " + str(kernel_language))
                kernel_name = kernel_spec_name
                break
        else:
            log.warning("Found no matching kernel for name=" +
                        str(kernel_name) + ", language=" +
                        str(kernel_language))
            summary_specs = [
                "name=" + str(name) + ", language=" +
                str(info['spec'].get('language'))
                for name, info in kernel_specs.items()
            ]
            log.warning("Found kernel specs: " + '; '.join(summary_specs))

    start_time = datetime.now()
    exported = executenb(nb,
                         cwd=os.path.dirname(nb_path),
                         kernel_name=kernel_name,
                         timeout=600)
    execution_time = (datetime.now() - start_time).seconds
    log.info("Execution time is " + str(execution_time))
    rel_path = os.path.relpath(nb_path, os.getcwd())
    dest_path = os.path.join(output_dir, "notebooks", rel_path)
    log.info("Saving exported notebook to " + str(dest_path))
    try:
        os.makedirs(os.path.dirname(dest_path))
    except FileExistsError:
        pass

    with open(dest_path, "w") as f:
        nbformat.write(exported, f)
Пример #23
0
 def stop(self):
     tornado.ioloop.IOLoop.instance().stop()
     log.info("Web server stopped")
Пример #24
0
def test_one_repo(repo, ref="master", run_dir="./runs", force_build=False):
    slug = repo_slug(repo).lower()
    slug_parts = slug.split("/")
    path = "/".join([slug_parts[0], slug_parts[1][0]] + slug_parts[1:])
    repo_run_dir = os.path.join(run_dir, path)
    log_dir = os.path.join(repo_run_dir, "logs")
    result_dir = os.path.join(repo_run_dir, "results")
    for d in [log_dir, result_dir]:
        try:
            os.makedirs(d)
        except FileExistsError:
            pass
    result_file = os.path.join(result_dir, f"results-{ref}-{run_id}.csv")
    with open(result_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(TestResult._fields)
    build_log_file = os.path.join(log_dir, f"build-{ref}-{run_id}.txt")

    checkout_path, resolved_ref, last_modified = clone_repo(repo, ref)

    log.info(f"Building {repo}@{ref} in {repo_run_dir} with run id {run_id}")
    results = []

    def add_result(kind, test_id, success, path):
        path = os.path.relpath(path, run_dir)
        log.info(
            f"Recording test result: repo={repo}, kind={kind}, test_id={test_id}, {'success' if success else 'failure'}"
        )
        result = TestResult(
            repo,
            ref,
            resolved_ref,
            last_modified,
            kind,
            test_id,
            success,
            path,
            timestamp,
            run_id,
            repo2docker.__version__,
        )
        results.append(result)

        with open(result_file, "a") as f:
            writer = csv.writer(f)
            writer.writerow(result)

    try:
        image, checkout_path = build_repo(
            repo,
            resolved_ref=resolved_ref,
            checkout_path=checkout_path,
            build_log_file=build_log_file,
            force_build=force_build,
        )
    except Exception:
        # log errors that won't be in the build log
        # (these will usually be bugs in our script!)
        if not isinstance(Exception, CalledProcessError):
            log.exception("Build failure")
            with open(build_log_file, "a") as f:
                traceback.print_exc(file=f)
        # record build failure
        add_result(kind="build",
                   test_id="build",
                   success=False,
                   path=build_log_file)
        return result_file, results
    else:
        add_result(kind="build",
                   test_id="build",
                   success=True,
                   path=build_log_file)

    for result in run_tests(image, checkout_path, repo_run_dir):
        add_result(**result)

    return result_file, results
Пример #25
0
 async def options(self, *args, **kwargs):
     self.send_cors_headers()
     log.info("Options")
     await self.finish('')
Пример #26
0
 async def on_message(self, message):
     log.info("Message received: %s", message)
     await self.send_event(message)
Пример #27
0
 def get(self, token=None):
     if not token:
         self.set_status(400)
         self.write('A token is required.')
         self.flush()
         self.finish()
         return
     request_data = DB.get_request_data(token)
     if not request_data:
         log.info(f'''Invalid token: {token}''')
         self.render('invalid_token.html',
                     rootPath=r'{}/'.format(Settings.APP_ROOT))
     else:
         token = request_data[0][0]
         email = request_data[0][1]
         last_name = request_data[0][2]
         first_name = request_data[0][3]
         subject = request_data[0][4]
         message = request_data[0][5]
         topics = request_data[0][6]
         received = request_data[0][7]
         if received == 1:
             self.render('already_received.html',
                         rootPath=r'{}/'.format(Settings.APP_ROOT))
             return
         if subject.lower() == 'testing':
             log.info(
                 'TESTING. Skipping Jira ticket creation and DESDM team email.'
             )
             DB.mark_received(token)
             self.set_status(200)
             self.render('submission_confirmed.html',
                         rootPath=r'{}/'.format(Settings.APP_ROOT))
             return
         try:
             jira_ticket.create_ticket(
                 first_name,
                 last_name,
                 email,
                 topics,
                 subject,
                 message,
             )
         except Exception as e:
             log.error(f'''Error creating Jira ticket: {e}''')
             self.write(
                 '<p>There was an error confirming your form. Please try again.</p><pre>{e}</pre>'
             )
             self.set_status(500)
             self.flush()
             self.finish()
             return
         try:
             assert DB.mark_received(token)
         except Exception as e:
             log.error(f'''Error deleting request record: {e}''')
             self.set_status(200)
             self.flush()
             self.finish()
             return
         self.set_status(200)
         self.render('submission_confirmed.html',
                     rootPath=r'{}/'.format(Settings.APP_ROOT))
Пример #28
0
 def stop(self):
     log.info('Stopping...')
     if not self.stopped.is_set():
         self.io_loop.stop()