def main(argv: List[str]) -> None:
    katsdpservices.setup_logging()
    katsdpservices.setup_restart()
    args = master_controller.parse_args(argv)
    if args.log_level is not None:
        logging.root.setLevel(args.log_level.upper())

    if args.interface_mode:
        logging.warning("Note: Running master controller in interface mode. "
                        "This allows testing of the interface only, "
                        "no actual command logic will be enacted.")

    rewrite_gui_urls: Optional[Callable[[aiokatcp.Sensor], bytes]]
    if args.haproxy:
        rewrite_gui_urls = functools.partial(web.rewrite_gui_urls, args.external_url)
    else:
        rewrite_gui_urls = None

    loop = asyncio.get_event_loop()
    server = master_controller.DeviceServer(args, rewrite_gui_urls=rewrite_gui_urls)
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, functools.partial(handle_signal, server))
    with katsdpservices.start_aiomonitor(loop, args, locals()):
        loop.run_until_complete(async_main(server, args))
    loop.close()
Пример #2
0
def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset', type=str, help='Input dataset')
    parser.add_argument('output_dir', type=str, help='Parent directory for output')
    parser.add_argument('prefix', type=str, help='Prefix for output directories and filenames')
    parser.add_argument('stream', type=str, help='Stream name for telescope state inputs')
    parser.add_argument('--log-level', type=str, metavar='LEVEL',
                        help='Logging level [INFO]')
    args = parser.parse_args()
    katsdpservices.setup_logging()
    if args.log_level is not None:
        logging.getLogger().setLevel(args.log_level.upper())

    dataset = katdal.open(args.dataset, chunk_store=None, upgrade_flags=False)
    telstate = dataset.source.telstate.wrapped.root()
    telstate = telstate.view(telstate.join(dataset.source.capture_block_id, args.stream))

    output_dir = '{}_{}'.format(args.prefix, uuid.uuid4())
    output_dir = os.path.join(args.output_dir, output_dir)
    filename = args.prefix + '_report.html'
    tmp_dir = output_dir + '.writing'
    os.mkdir(tmp_dir)
    try:
        common_stats, target_stats = report.get_stats(dataset, telstate)
        report.write_report(common_stats, target_stats, os.path.join(tmp_dir, filename))
        report.write_metadata(dataset, common_stats, target_stats,
                              os.path.join(tmp_dir, 'metadata.json'))
        os.rename(tmp_dir, output_dir)
    except Exception:
        # Make a best effort to clean up
        shutil.rmtree(tmp_dir, ignore_errors=True)
        raise
Пример #3
0
def main() -> None:
    katsdpservices.setup_logging()
    katsdpservices.setup_restart()
    args = parse_args()
    logger = logging.getLogger("katsdpcam2telstate")

    loop = asyncio.get_event_loop()
    client = Client(args, logger)
    client.parse_streams()
    with katsdpservices.start_aiomonitor(loop, args, locals()):
        loop.run_until_complete(client.run())
Пример #4
0
 def test_simple(self):
     katsdpservices.setup_logging()
     logging.debug('debug message')
     logging.info('info message')
     logging.warning('warning message\nwith\nnewlines')
     self.assertRegex(
         self.stderr.getvalue(),
         re.compile(
             "\\A2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - INFO - info message\n"
             "2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - WARNING - warning message\n"
             "with\n"
             "newlines\n\\Z", re.M))
Пример #5
0
 def test_log_level(self):
     os.environ['KATSDP_LOG_LEVEL'] = 'debug'
     katsdpservices.setup_logging()
     logging.debug('debug message')
     logging.info('info message')
     logging.warning('warning message\nwith\nnewlines')
     self.assertRegex(
         self.stderr.getvalue(),
         re.compile(
             "\\A2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - DEBUG - debug message\n"
             "2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - INFO - info message\n"
             "2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - WARNING - warning message\n"
             "with\n"
             "newlines\n\\Z", re.M))
Пример #6
0
 def test_one_line(self):
     os.environ['KATSDP_LOG_ONELINE'] = '1'
     katsdpservices.setup_logging()
     logging.debug('debug message')
     logging.info('info message')
     logging.warning('warning message\nwith\nnewlines')
     # The \\\\ in the regex is a literal \: one level of escaping for
     # Python, one for regex.
     self.assertRegex(
         self.stderr.getvalue(),
         re.compile(
             "\\A2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - INFO - info message\n"
             "2017-03-02T14:02:03.125Z - test_logging.py:\\d+ - WARNING - warning message\\\\ "
             "with\\\\ "
             "newlines\n\\Z", re.M))
Пример #7
0
async def main():
    setup_logging()
    opts = parse_opts()
    telstate = opts.telstate

    logging.info("Opening file %s", opts.file)
    simdata = SimData.factory(opts.file, bchan=opts.bchan, echan=opts.echan,
                              n_substreams=opts.substreams)

    async with simdata:
        logging.info("Clearing telescope state")
        telstate.clear()

        logging.info("Setting values in telescope state")
        simdata.setup_telstate(telstate)
Пример #8
0
 def _test_gelf(self, localname, extra):
     self.maxDiff = 4096  # Diff tends to be too big to display otherwise
     sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
                          socket.IPPROTO_UDP)
     with closing(sock):
         sock.bind(('127.0.0.1', 0))
         port = sock.getsockname()[1]
         os.environ['KATSDP_LOG_GELF_ADDRESS'] = '127.0.0.1:{}'.format(port)
         if localname:
             os.environ['KATSDP_LOG_GELF_LOCALNAME'] = 'myhost'
         if extra:
             os.environ[
                 'KATSDP_LOG_GELF_EXTRA'] = '{"hello": "world", "number": 3}'
         # Fake the container ID
         container_id = "abcdef0123456789"
         with mock.patch('katsdpservices.logging.docker_container_id',
                         return_value=container_id):
             katsdpservices.setup_logging()
         # exc_info=False is to test the fix for
         # https://github.com/keeprocking/pygelf/issues/29
         logging.info('info message', exc_info=False)
         raw = sock.recv(4096)
         raw = zlib.decompress(raw)
     data = json.loads(raw.decode('utf-8'))
     # This dictionary may need to be updated depending on the implementation
     expected = {
         "timestamp": self.time.return_value,
         "version": "1.1",
         "short_message": "info message",
         "_logger_name": "katsdpservices.test.dummy",
         "_file": mock.ANY,
         "_line": mock.ANY,
         "_func": "_test_gelf",
         "_module": "test_logging",
         "_docker.id": container_id,
         "_timestamp_precise": "2017-03-02T14:02:03.125125Z",
         "level": 6,
         "host": "myhost" if localname else mock.ANY
     }
     if extra:
         expected["_hello"] = "world"
         expected["_number"] = 3
     expected["_stack_info"] = None
     self.assertEqual(data, expected)
Пример #9
0
async def main():
    setup_logging()
    opts = parse_opts()

    logger.info("Use TS set up by sim_ts.py and run_cal.py scripts.")
    telstate = opts.telstate
    n_substreams = get_n_substreams(telstate)

    simdata = SimData.factory(opts.file, opts.server, bchan=opts.bchan, echan=opts.echan,
                              n_substreams=n_substreams)
    async with simdata:
        logger.info("Issuing capture-init")
        await simdata.capture_init()
        logger.info("TX: start.")
        await simdata.data_to_spead(telstate, opts.l0_spead, opts.l0_rate, max_scans=opts.max_scans,
                                    interface=opts.l0_interface)
        logger.info("TX: ended.")
        logger.info("Issuing capture-done")
        await simdata.capture_done()
Пример #10
0
def main():
    parser = get_parser()
    args = parser.parse_args(namespace=arguments.SmartNamespace())
    katsdpservices.setup_logging()
    if args.log_level is not None:
        logger.setLevel(args.log_level.upper())

    profiling.Profiler.set_profiler(profiling.FlamegraphProfiler())

    with closing(
            loader.load(args.input_file, args.input_option, args.start_channel,
                        args.stop_channel)) as dataset:
        writer = Writer(args, dataset)
        context = accel.create_some_context(interactive=False,
                                            device_filter=lambda x: x.is_cuda)
        queue = context.create_command_queue()
        frontend.run(args, context, queue, dataset, writer)
        # frontend.run modifies args.stop_channel in place, so even if it
        # wasn't specified by the user it will now be valid.
        writer.finalize(dataset, args.start_channel, args.stop_channel)
Пример #11
0
def main() -> None:
    katsdpservices.setup_logging()
    katsdpservices.setup_restart()

    args = parse_args()
    if args.log_level is not None:
        logging.root.setLevel(args.log_level.upper())
    if args.file_base is None and args.stats is None:
        logging.warning(
            'Neither --file-base nor --stats was given; nothing useful will happen'
        )
    if args.file_base is not None and not os.access(args.file_base, os.W_OK):
        logging.error('Target directory (%s) is not writable', args.file_base)
        sys.exit(1)

    loop = asyncio.get_event_loop()
    server = KatcpCaptureServer(args, loop)
    loop.add_signal_handler(signal.SIGINT, lambda: on_shutdown(server))
    loop.add_signal_handler(signal.SIGTERM, lambda: on_shutdown(server))
    with katsdpservices.start_aiomonitor(loop, args, locals()):
        loop.run_until_complete(server.start())
        loop.run_until_complete(server.join())
    loop.close()
Пример #12
0
    upload_size = sum([product.upload_size() for product in upload_list])
    upload_files = []
    for product in upload_list:
        product.update_state('TRANSFER_STARTED')
        upload_files.extend(product.staged_for_transfer)
    logger.debug("Uploading %.2f MB of data", (upload_size // 1e6))
    uploader = Uploader(trawl_dir, boto_dict, upload_files)
    uploader.upload()
    failed_count = uploader.set_failed_tokens(solr_url)
    logger.info(
        f'A total of {failed_count} exceptions where encountered this cycle.')
    return upload_size


if __name__ == "__main__":
    katsdpservices.setup_logging()
    logging.basicConfig(level=logging.INFO)
    katsdpservices.setup_restart()

    parser = OptionParser(usage="vis_trawler.py <trawl_directory>")
    parser.add_option("--s3-host", default="localhost",
                      help="S3 gateway host address [default = %default]")
    parser.add_option("--s3-port", type="int", default=7480,
                      help="S3 gateway port [default = %default]")
    parser.add_option("--solr-url", default="http://kat-archive.kat.ac.za:8983/solr/kat_core",
                      help="Solr end point for metadata extraction [default = %default]")

    (options, args) = parser.parse_args()
    if len(args) < 1 or not os.path.isdir(args[0]):
        print(__doc__)
        sys.exit()
Пример #13
0
def main():
    setup_logging()
    parser = create_parser()
    args = parser.parse_args()

    # Open the observation
    if (args.access_key is not None) != (args.secret_key is not None):
        parser.error('--access-key and --secret-key must be used together')
    if args.access_key is not None and args.token is not None:
        parser.error('--access-key/--secret-key cannot be used with --token')
    open_kwargs = {}
    if args.access_key is not None:
        open_kwargs['credentials'] = (args.access_key, args.secret_key)
    elif args.token is not None:
        open_kwargs['token'] = args.token
    katdata = katdal.open(args.katdata, applycal='l1', **open_kwargs)

    post_process_args(args, katdata)

    uvblavg_args, mfimage_args, band = _infer_defaults_from_katdal(katdata)

    # Get config defaults for uvblavg and mfimage and merge user supplied ones
    uvblavg_parm_file = pjoin(CONFIG, f'uvblavg_MKAT_{band}.yaml')
    log.info('UVBlAvg parameter file for %s-band: %s', band, uvblavg_parm_file)
    mfimage_parm_file = pjoin(CONFIG, f'mfimage_MKAT_{band}.yaml')
    log.info('MFImage parameter file for %s-band: %s', band, mfimage_parm_file)

    user_uvblavg_args = get_and_merge_args(uvblavg_parm_file, args.uvblavg)
    user_mfimage_args = get_and_merge_args(mfimage_parm_file, args.mfimage)

    # Merge katdal defaults with user supplied defaults
    recursive_merge(user_uvblavg_args, uvblavg_args)
    recursive_merge(user_mfimage_args, mfimage_args)

    # Get the default config.
    dc = kc.get_config()
    # Set up aipsdisk configuration from args.workdir
    if args.workdir is not None:
        aipsdirs = [(None,
                     pjoin(args.workdir, args.capture_block_id + '_aipsdisk'))]
    else:
        aipsdirs = dc['aipsdirs']
    log.info('Using AIPS data area: %s', aipsdirs[0][1])

    # Set up output configuration from args.outputdir
    fitsdirs = dc['fitsdirs']

    outputname = args.capture_block_id + OUTDIR_SEPARATOR + args.telstate_id + \
        OUTDIR_SEPARATOR + START_TIME

    outputdir = pjoin(args.outputdir, outputname)
    # Set writing tag for duration of the pipeline
    work_outputdir = outputdir + WRITE_TAG
    # Append outputdir to fitsdirs
    # NOTE: Pipeline is set up to always place its output in the
    # highest numbered fits disk so we ensure that is the case
    # here.
    fitsdirs += [(None, work_outputdir)]
    log.info('Using output data area: %s', outputdir)

    kc.set_config(aipsdirs=aipsdirs, fitsdirs=fitsdirs)

    setup_aips_disks()

    # Add output_id and capture_block_id to configuration
    kc.set_config(cfg=kc.get_config(),
                  output_id=args.output_id,
                  cb_id=args.capture_block_id)

    # Set up telstate link then create
    # a view based the capture block ID and output ID
    telstate = TelescopeState(args.telstate)
    view = telstate.join(args.capture_block_id, args.telstate_id)
    ts_view = telstate.view(view)

    katdal_select = args.select
    katdal_select['nif'] = args.nif

    # Create Continuum Pipeline
    pipeline = pipeline_factory('online',
                                katdata,
                                ts_view,
                                katdal_select=katdal_select,
                                uvblavg_params=uvblavg_args,
                                mfimage_params=mfimage_args,
                                nvispio=args.nvispio)

    # Execute it
    metadata = pipeline.execute()

    # Create QA products if images were created
    if metadata:
        make_pbeam_images(metadata, outputdir, WRITE_TAG)
        make_qa_report(metadata, outputdir, WRITE_TAG)
        organise_qa_output(metadata, outputdir, WRITE_TAG)

        # Remove the writing tag from the output directory
        os.rename(work_outputdir, outputdir)
    else:
        os.rmdir(work_outputdir)
Пример #14
0
async def main():
    katsdpservices.setup_logging()
    logger = logging.getLogger("katsdpmetawriter")
    katsdpservices.setup_restart()

    parser = katsdpservices.ArgumentParser()
    parser.add_argument('--rdb-path',
                        default="/var/kat/data",
                        metavar='RDBPATH',
                        help='Root in which to write RDB dumps')
    parser.add_argument('--store-s3',
                        dest='store_s3',
                        default=False,
                        action='store_true',
                        help='Enable storage of RDB dumps in S3')
    parser.add_argument(
        '--access-key',
        default="",
        metavar='ACCESS',
        help=
        'S3 access key with write permission to the specified bucket [unauthenticated]'
    )
    parser.add_argument(
        '--secret-key',
        default="",
        metavar='SECRET',
        help='S3 secret key for the specified access key [unauthenticated]')
    parser.add_argument('--s3-host',
                        default='localhost',
                        metavar='HOST',
                        help='S3 gateway host address [%(default)s]')
    parser.add_argument('--s3-port',
                        default=7480,
                        metavar='PORT',
                        help='S3 gateway port [%(default)s]')
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=2049,
                        metavar='N',
                        help='KATCP host port [%(default)s]')
    parser.add_argument('-a',
                        '--host',
                        default="",
                        metavar='HOST',
                        help='KATCP host address [all hosts]')

    args = parser.parse_args()

    if not os.path.exists(args.rdb_path):
        logger.error("Specified RDB path, %s, does not exist.", args.rdb_path)
        sys.exit(2)

    botocore_dict = None
    if args.store_s3:
        botocore_dict = katsdpmetawriter.make_botocore_dict(args)
        async with katsdpmetawriter.get_s3_connection(
                botocore_dict, fail_on_boto=True) as s3_conn:
            if s3_conn:
                # we rebuild the connection each time we want to write a meta-data dump
                logger.info("Successfully tested connection to S3 endpoint.")
            else:
                logger.warning(
                    "S3 endpoint %s:%s not available. Files will only be written locally.",
                    args.s3_host, args.s3_port)
    else:
        logger.info(
            "Running in disk only mode. RDB dumps will not be written to S3")

    telstate = await get_async_telstate(args.telstate_endpoint)
    server = katsdpmetawriter.MetaWriterServer(args.host, args.port,
                                               botocore_dict, args.rdb_path,
                                               telstate)
    logger.info("Started meta-data writer server.")
    loop = asyncio.get_event_loop()
    await server.start()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, lambda: on_shutdown(loop, server))
    await server.join()
    telstate.backend.close()
    await telstate.backend.wait_closed()
Пример #15
0
def main() -> None:
    args = get_arguments()
    katsdpservices.setup_logging()
    logging.root.setLevel(args.log_level.upper())
    app = make_app(args)
    web.run_app(app, host=args.bind, port=args.port)
Пример #16
0
def create_parser():
    formatter_class = argparse.ArgumentDefaultsHelpFormatter
    parser = argparse.ArgumentParser(formatter_class=formatter_class)

    parser.add_argument("-a",
                        "--aipsdisks",
                        default=None,
                        type=lambda s: [(None, ds.strip())
                                        for ds in s.split(',')],
                        help="Comma separated list of paths to aipsdisks.")

    parser.add_argument("-f",
                        "--fitsdisks",
                        default=None,
                        type=lambda s: [(None, ds.strip())
                                        for ds in s.split(',')],
                        help="Comma separated list of paths to fitsdisks.")

    return parser


setup_logging()

args = create_parser().parse_args()

kc.set_config(aipsdirs=args.aipsdisks, fitsdirs=args.fitsdisks)
setup_aips_disks()
rewrite_dadevs()
rewrite_netsp()
link_obit_data()
Пример #17
0
def main() -> None:
    parser, args = parse_args()
    prepare_env(args)
    katsdpservices.setup_logging()
    katsdpservices.setup_restart()
    if args.log_level is not None:
        logging.root.setLevel(args.log_level.upper())

    logger = logging.getLogger('katsdpcontroller')
    logger.info("Starting SDP product controller...")
    logger.info('katcp: %s:%d', args.host, args.port)
    logger.info('http: %s', args.http_url)

    master_controller = aiokatcp.Client(args.master_controller.host,
                                        args.master_controller.port)
    image_lookup = product_controller.KatcpImageLookup(master_controller)
    try:
        image_resolver_factory = make_image_resolver_factory(
            image_lookup, args)
    except ValueError as exc:
        parser.error(str(exc))

    framework_info = addict.Dict()
    framework_info.user = args.user
    framework_info.name = args.subarray_product_id
    framework_info.checkpoint = True
    framework_info.principal = args.principal
    framework_info.roles = [args.realtime_role, args.batch_role]
    framework_info.capabilities = [{
        'type': 'MULTI_ROLE'
    }, {
        'type': 'TASK_KILLING_STATE'
    }]

    loop = asyncio.get_event_loop()
    sched = scheduler.Scheduler(
        args.realtime_role,
        args.host,
        args.http_port,
        args.http_url,
        task_stats=product_controller.TaskStats(),
        runner_kwargs=dict(access_log_class=web_utils.AccessLogger))
    sched.app.router.add_get('/metrics', web_utils.prometheus_handler)
    sched.app.router.add_get('/health', web_utils.health_handler)
    driver = pymesos.MesosSchedulerDriver(sched,
                                          framework_info,
                                          args.mesos_master,
                                          use_addict=True,
                                          implicit_acknowledgements=False)
    sched.set_driver(driver)
    driver.start()

    dashboard_path = f'/gui/{args.subarray_product_id}/product/dashboard/'
    dashboard_url: Optional[str] = args.dashboard_url
    if args.dashboard_port != 0 and dashboard_url is None:
        dashboard_url = str(
            yarl.URL.build(scheme='http',
                           host=args.external_hostname,
                           port=args.dashboard_port,
                           path=dashboard_path))

    server = product_controller.DeviceServer(
        args.host,
        args.port,
        master_controller,
        args.subarray_product_id,
        sched,
        batch_role=args.batch_role,
        interface_mode=False,
        localhost=args.localhost,
        image_resolver_factory=image_resolver_factory,
        s3_config=args.s3_config if args.s3_config is not None else {},
        graph_dir=args.write_graphs,
        dashboard_url=dashboard_url)
    if args.dashboard_port != 0:
        init_dashboard(server, args, dashboard_path)

    with katsdpservices.start_aiomonitor(loop, args, locals()):
        loop.run_until_complete(run(sched, server))
    loop.close()