Пример #1
0
def get_default(sample, to):
    """
    Get default parset copy
    """
    log.info(
        "Dumping default configuration to {0:s} as requested. Goodbye!".format(to))
    sample_config = os.path.join(pckgdir, "sample_configurations",
            SAMPLE_CONFIGS[sample])
    os.system('cp {0:s} {1:s}'.format(sample_config, to))
Пример #2
0
 def make_symlink(link, target):
     if os.path.lexists(link):
         if os.path.islink(link):
             os.unlink(link)  # old symlink can go
         else:
             log.warning("{} already exists and is not a symlink, can't relink".format(link))
             return False
     if not os.path.lexists(link):
         os.symlink(target, link)
         log.info("{} links to {}".format(link, target))
Пример #3
0
def flag_summary_plots(pipeline, json_flag_summary, prefix, wname, nob):
    """Generate flagging summary plots"""
    plots={
           'field ': {'title':'Field RFI summary',
                      'x_label': 'Field',
                      'y_label': 'Flagged data (%)',
                      'rotate_xlabel':False},
           'antenna ': {'title':'Antenna RFI summary',
                        'x_label': 'Antenna',
                        'y_label': 'Flagged data (%)',
                        'rotate_xlabel':True},
           'scan ': {'title':'Scans RFI summary',
                     'x_label': 'Scans',
                     'y_label': 'Flagged data (%)',
                     'rotate_xlabel':True},
           'correlation ': {'title':'Correlation RFI summary',
                            'x_label': 'Correlation',
                            'y_label': 'Flagged data (%)',
                            'rotate_xlabel':False}
          }
    with open(json_flag_summary) as f:
        summary_flags = json.load(f)
    plot_list = []
    outfile = ('{0:s}/diagnostic_plots/{1:s}-{2:s}-'
               'flagging-summary-plots-{3:d}.html').format(
                  pipeline.output, prefix, wname, nob)
    for plot_key in summary_flags.keys():
        keys = summary_flags[plot_key]['key']
        flagged = summary_flags[plot_key]['value']
        if plot_key=='scan ':
            zipped_lists = zip(list(map(int, keys)), flagged)
            keys, flagged = zip(*sorted(zipped_lists))
            keys = [str(key) for key in keys]

        rotate_xlabel=plots[plot_key]['rotate_xlabel']
        x_label=plots[plot_key]['x_label']
        y_label=plots[plot_key]['y_label']
        title=plots[plot_key]['title']
        plotter = figure(x_range=keys, x_axis_label=x_label, y_axis_label=y_label,
                         plot_width=600, plot_height=400, title=title)

        plotter.vbar(x=keys, top=flagged, width=0.9)

        plotter.xgrid.grid_line_color = None
        plotter.y_range.start = 0
        plotter.title.align = 'center'
        if rotate_xlabel:
            plotter.xaxis.major_label_orientation = math.pi/2
        plot_list.append(plotter)
    if len(plot_list) == 4:
       output_file(outfile)
       log.info("Saving flag summary plots in {}".format(outfile))
       save(column(row(plot_list[0], plot_list[3]),
                   row(plot_list[2], plot_list[1])))
Пример #4
0
def generate_report_notebooks(notebooks, output_dir, prefix, container_tech):
    opts = ["--non-interactive"]

    if container_tech == "docker":
        opts.append("--docker")
    elif container_tech == "singularity":
        opts.append("--singularity")
    else:
        log.warning(
            "Container technology '{}' not supported by radiopadre, skipping report rendering"
        )
        return

    if caracal.DEBUG:
        opts += ['-v', '2', '--container-debug']

    ## disabling as per https://github.com/caracal-pipeline/caracal/issues/1161
    # # first time run with -u
    # global _radiopadre_updated
    # if not _radiopadre_updated:
    #     opts.append('--update')
    #     _radiopadre_updated = True
    start_time = time.time()

    log.info("Rendering report(s)")
    for notebook in notebooks:
        if prefix:
            notebook = "{}-{}".format(prefix, notebook)
        nbdest = os.path.join(output_dir, notebook + ".ipynb")
        nbhtml = os.path.join(output_dir, notebook + ".html")
        if os.path.exists(nbdest):
            try:
                xrun("run-radiopadre", opts + ["--nbconvert", nbdest], log=log)
            except StimelaCabRuntimeError as exc:
                log.warning(
                    "Report {} failed to render ({}). HTML report will not be available."
                    .format(nbhtml, exc))
            # check that HTML file actually showed up (sometimes the container doesn't report an error)
            if os.path.exists(
                    nbhtml) and os.path.getmtime(nbhtml) >= start_time:
                log.info("Rendered report {}".format(nbhtml))
            else:
                log.warning("Report {} failed to render".format(nbhtml))
        else:
            log.warning(
                "Report notebook {} not found, skipping report rendering".
                format(nbdest))
Пример #5
0
    def __run(debug=False):
        """ Executes pipeline """
#        with stream_director(log) as director:  # stdout and stderr needs to go to the log as well -- nah

        try:
            # Obtain some divine knowledge
            cdb = mkct.calibrator_database()

            pipeline = worker_administrator(config,
                           workers_directory,
                           add_all_first=False, prefix=options.general_prefix,
                           configFileName=options.config, singularity_image_dir=options.singularity_image_dir,
                           container_tech=options.container_tech, start_worker=options.start_worker,
                           end_worker=options.end_worker, generate_reports=not options.no_reports)

            if options.report:
                pipeline.regenerate_reports()
            else:
                pipeline.run_workers()
        except SystemExit as e:
            # if e.code != 0:
            log.error("A pipeline worker initiated sys.exit({0:}). This is likely a bug, please report.".format(e.code))
            log.info("  More information can be found in the logfile at {0:s}".format(caracal.CARACAL_LOG))
            log.info("  You are running version {0:s}".format(str(__version__)), extra=dict(logfile_only=True))
            if debug:
                log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
                pdb.post_mortem(sys.exc_info()[2])
            sys.exit(1)  # indicate failure

        except KeyboardInterrupt:
            log.error("Ctrl+C received from user, shutting down. Goodbye!")
        except Exception as exc:
            log.error("{} [{}]".format(exc, type(exc).__name__), extra=dict(boldface=True))
            log.info("  More information can be found in the logfile at {0:s}".format(caracal.CARACAL_LOG))
            log.info("  You are running version {0:s}".format(str(__version__)), extra=dict(logfile_only=True))
            for line in traceback.format_exc().splitlines():
                log.error(line, extra=dict(traceback_report=True))
            if debug:
                log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
                pdb.post_mortem(sys.exc_info()[2])
            log.info("exiting with error code 1")
            sys.exit(1)  # indicate failure
Пример #6
0
 def parse_cabspec_dict(self, cabspec_seq):
     """Turns sequence of cabspecs into a Stimela cabspec dict"""
     cabspecs = OrderedDict()
     speclists = OrderedDict()
     # collect all specs encountered, sort them by cab
     for spec in cabspec_seq:
         name, version, tag = spec["name"], spec.get("version") or None, spec.get("tag") or None
         if not version and not tag:
             log.warning(f"Neither version nor tag specified for cabspec {name}, ignoring")
             continue
         speclists.setdefault(name, []).append((version, tag))
     # now process each cab's list of specs.
     for name, speclist in speclists.items():
         if len(speclist) == 1:
             version, tag = speclist[0]
             if version is None:
                 log.info(f"  {name}: forcing tag {tag} for all invocations")
                 cabspecs[name] = dict(tag=tag, force=True)
                 continue
             elif tag is None:
                 log.info(f"  {name}: forcing version {version} for all invocations")
                 cabspecs[name] = dict(version=version)
                 continue
         # else make dict of version: tag pairs
         cabspecs[name] = dict(version={version: tag for version, tag in speclist}, force=True)
         for version, tag in speclist:
             log.info(f"  {name}: using tag {tag} for version {version}")
     return cabspecs
Пример #7
0
def get_json_flag_summary(pipeline, flagmanager_summary_file, prefix, wname):
    """Generate json file with flag summary using log from flagmanager"""
    with open(flagmanager_summary_file) as f:
        data = f.readlines()

    summary_flags = {}
    main_separator=' flagged:'
    summary_keys = ['field ', 'antenna ', 'scan ', 'correlation ']
    json_file = flagmanager_summary_file.replace('.txt', '.json')
    json_file = json_file.replace('log-flagging-', wname)
    for summary_key in summary_keys:
        keys = []
        flagged = []
        for d in data:
            if summary_key in d:
                value = d.split(summary_key)[1].split(main_separator)[0]
                if value not in keys:
                    keys.append(value)
                    flagged.append(float(d.split()[-1][1:-2]))
        summary_flags[summary_key] = {'key': keys, 'value': flagged}
    log.info("Saving flag summary in {}".format(json_file))
    with open(json_file, 'w') as f:
        json.dump(summary_flags, f)
    return json_file
Пример #8
0
    def run_workers(self):
        """ Runs the  workers """
        report_updated = False

        for _name, _worker, i in self.workers:
            try:
                worker = __import__(_worker)
            except ImportError:
                traceback.print_exc()
                raise ImportError('Worker "{0:s}" could not be found at {1:s}'.format(
                    _worker, self.workers_directory))

        if self.config["general"]["cabs"]:
            log.info("Configuring cab specification overrides")
            cabspecs_general = self.parse_cabspec_dict(self.config["general"]["cabs"])
        else:
            cabspecs_general = {}

        active_workers = []
        # first, check that workers import, and check their configs
        for _name, _worker, i in self.workers:
            config = self.config[_name]
            if 'enable' in config and not config['enable']:
                self.skip.append(_worker)
                continue
            log.info("Configuring worker {}".format(_name))
            try:
                worker = __import__(_worker)
            except ImportError:
                log.error('Error importing worker "{0:s}" from {1:s}'.format(_worker, self.workers_directory))
                raise
            if hasattr(worker, 'check_config'):
                worker.check_config(config)
            # check for cab specs
            cabspecs = cabspecs_general
            if config["cabs"]:
                cabspecs = cabspecs.copy()
                cabspecs.update(self.parse_cabspec_dict(config["cabs"]))
            active_workers.append((_name, worker, config, cabspecs))

        # now run the actual pipeline
        #for _name, _worker, i in self.workers:
        for _name, worker, config, cabspecs in active_workers:
            # Define stimela recipe instance for worker
            # Also change logger name to avoid duplication of logging info
            label = getattr(worker, 'LABEL', None)
            if label is None:
                # if label is not set, take filename, and split off _worker.py
                label =  os.path.basename(worker.__file__).rsplit("_", 1)[0]

            recipe = stimela.Recipe(label,
                                    ms_dir=self.msdir,
                                    singularity_image_dir=self.singularity_image_dir,
                                    log_dir=self.logs,
                                    cabspecs=cabspecs,
                                    logfile=False, # no logfiles for recipes
                                    logfile_task=f'{self.logs}/log-{label}-{{task}}-{self.timeNow}.txt')

            recipe.JOB_TYPE = self.container_tech
            self.CURRENT_WORKER = _name
            # Don't allow pipeline-wide resume
            # functionality
            os.system('rm -f {}'.format(recipe.resume_file))
            # Get recipe steps
            # 1st get correct section of config file
            log.info("{0:s}: initializing".format(label), extra=dict(color="GREEN"))
            worker.worker(self, recipe, config)
            log.info("{0:s}: running".format(label))
            recipe.run()
            log.info("{0:s}: finished".format(label))

            # this should be in the cab cleanup code, no?

            casa_last = glob.glob(self.output + '/*.last')
            for file_ in casa_last:
                os.remove(file_)

            # update report at end of worker if so configured
            if self.generate_reports and config["report"]:
                self.regenerate_reports()
                report_updated = True
            else:
                report_updated = False

        # generate final report
        if self.config["general"]["final_report"] and self.generate_reports and not report_updated:
            self.regenerate_reports()

        log.info("pipeline run complete")
Пример #9
0
    def init_pipeline(self, prep_input=True):
        def make_symlink(link, target):
            if os.path.lexists(link):
                if os.path.islink(link):
                    os.unlink(link)  # old symlink can go
                else:
                    log.warning("{} already exists and is not a symlink, can't relink".format(link))
                    return False
            if not os.path.lexists(link):
                os.symlink(target, link)
                log.info("{} links to {}".format(link, target))

        # First create input folders if they don't exist
        if not os.path.exists(self.input):
            os.mkdir(self.input)
        if not os.path.exists(self.output):
            os.mkdir(self.output)
        if not os.path.exists(self.rawdatadir):
            os.mkdir(self.rawdatadir)
        if not os.path.exists(self.obsinfo):
            os.mkdir(self.obsinfo)
        if not os.path.exists(self.logs):
            os.mkdir(self.logs)
        log.info("output directory for logs is {}".format(self.logs))
        make_symlink(self.logs_symlink, os.path.basename(self.logs))
        if not os.path.exists(self.reports):
            os.mkdir(self.reports)
        if not os.path.exists(self.diagnostic_plots):
            os.mkdir(self.diagnostic_plots)
        if not os.path.exists(self.configFolder):
            os.mkdir(self.configFolder)
        if not os.path.exists(self.caltables):
            os.mkdir(self.caltables)
        if not os.path.exists(self.masking):
            os.mkdir(self.masking)
        if not os.path.exists(self.continuum):
            os.mkdir(self.continuum)
        if not os.path.exists(self.cubes):
            os.mkdir(self.cubes)
        # create proper logfile and start flushing
        # NB (Oleg): placing this into output rather than output/logs to make the reporting notebooks easier
        CARACAL_LOG_BASENAME = 'log-caracal.txt'
        caracal.CARACAL_LOG = os.path.join(self.logs, CARACAL_LOG_BASENAME)
        caracal.log_filehandler.setFilename(caracal.CARACAL_LOG, delay=False)

        # placing a symlink into logs to appease Josh
        make_symlink(os.path.join(self.output, CARACAL_LOG_BASENAME),
                     os.path.join(os.path.basename(self.logs), CARACAL_LOG_BASENAME))

        # Copy input data files into pipeline input folder
        if prep_input:
            log.info("Copying MeerKAT input files into input folder")
            datadir = "{0:s}/data/meerkat_files".format(pckgdir)
            for filename in os.listdir(datadir):
                src = os.path.join(datadir, filename)
                dest = os.path.join(self.input, filename)
                if not os.path.exists(dest):
                    if os.path.isdir(src):
                        shutil.copytree(src, dest)
                    else:
                        shutil.copy2(src, dest, follow_symlinks=False)

        # Copy standard notebooks
        self._init_notebooks = self.config['general']['init_notebooks']
        self._report_notebooks = self.config['general']['report_notebooks']
        all_nbs = set(self._init_notebooks) | set(self._report_notebooks)
        if all_nbs:
            notebooks.setup_default_notebooks(all_nbs, output_dir=self.output, prefix=self.prefix, config=self.config)
Пример #10
0
def worker(pipeline, recipe, config):
    uvrange = config['uvrange']
    plotter = config['standard_plotter']

    label_in = config['label_in']
    nobs = pipeline.nobs

    subdir = config['dirname']
    output_dir = os.path.join(pipeline.diagnostic_plots, subdir) if subdir else pipeline.diagnostic_plots

    if config['field'] == 'calibrators':
        fields = ['bpcal', 'gcal', 'fcal']
    elif config['field'] == 'target':
        fields = ['target']
    else:
        fields = config['field'].split(',')
        if set(fields).difference(['fcal', 'bpcal', 'gcal']):
            raise ValueError("Eligible values for 'field': 'target', 'calibrators', 'fcal', 'bpcal' or 'gcal'. " \
                             "User selected {}".format(",".join(fields)))
    log.info(f"plotting fields: {' '.join(fields)}")

    for iobs in range(nobs):
        
        label = config['label_plot']

        mslist  = pipeline.get_mslist(iobs, label_in, target=(config['field'] == 'target'))

        for msname in mslist:
            if not os.path.exists(os.path.join(pipeline.msdir, msname)):
                raise IOError("MS {0:s} does not exist. Please check that is where it should be.".format(msname))

        for msname in mslist:
            log.info(f"plotting MS: {msname}")
            msbase = os.path.splitext(msname)[0]

            ms_info_dict = pipeline.get_msinfo(msname)

            corr = config['correlation']
            ms_corrs = ms_info_dict['CORR']['CORR_TYPE']

            if corr == 'auto' or corr == 'all':
                corr = ','.join(ms_corrs)
            elif corr == 'diag' or corr == 'parallel':
                corr = ','.join([c for c in ms_corrs if len(c) == 2 and c[0] == c[1]])
            if not corr:
                log.warning(f"No correlations found to plot for {msname}")
                continue
            log.info(f"plotting correlations: {corr}")

            # new-school plots
            if config['shadems']['enable']:
                # make dict of substitutions
                basesubst = OrderedDict(msbase=os.path.splitext(msname)[0])

                # make a map: {(fields): field_type}, so we can loop over fields below, but only include unique fields
                field_map = OrderedDict()
                # make a reverse map: field_type -> "field_name,field_name"
                field_map_names = OrderedDict()
                # make set of all field names
                all_fields = set()
                for field_type in fields:
                    if (label_in != '') and (config['field'] == 'target'):
                        field_names = tuple(ms_info_dict['FIELD']['NAME'])
                    else:
                        field_names = tuple(getattr(pipeline, field_type)[iobs])
                    field_map.setdefault(field_names, field_type)
                    all_fields.update(field_names)
                    basesubst[field_type] = field_map_names[field_type] = ",".join(field_names)
                basesubst["all_fields"] = ",".join(all_fields)

                plot_args = []
                def collect_plots(args, plotspecs, extra_args, subst=None):
                    """Generic helper function to parse a list of shadems plot specs, and add them to plot_args"""
                    for iplot, plotspec in enumerate(plotspecs):
                        if plotspec:
                            plotspec = plotspec.format(**(subst or basesubst))
                            # add arguments from args, if not present in plotspec
                            plotspec = plotspec.split()
                            for arg, value in args.items():
                                arg = "--" + arg
                                if arg not in plotspec:
                                    plotspec += [arg, value]
                            plotspec += extra_args

                            plot_args.append(" ".join(plotspec))

                baseargs = OrderedDict(
                    png="{}-{}-{}".format(msbase, label,
                                          "{field}{_Spw}{_Scan}{_Ant}-{label}{_alphalabel}{_colorlabel}{_suffix}.png"),
                    title="'{ms} {_field}{_Spw}{_Scan}{_Ant}{_title}{_Alphatitle}{_Colortitle}'",
                    col=config['shadems']['default_column'],
                    corr=corr.replace(' ', ''))

                # collect generic plots
                collect_plots(baseargs, config['shadems']['plots'], [])

                # collect plots_by_corr
                args = baseargs.copy()
                args["field"] = ",".join(all_fields)
                collect_plots(args, config['shadems']['plots_by_corr'], ["--iter-corr"])

                # collect plots_by_field
                for field_names, field_type in field_map.items():
                    args = baseargs.copy()
                    args["field"] = ",".join(field_names)
                    args["png"] = "{}-{}-{}-{}".format(msbase, label, field_type,
                                  "{field}{_Spw}{_Scan}{_Ant}-{label}{_alphalabel}{_colorlabel}{_suffix}.png")
                    args["title"] = "'{ms} " + field_type + "{_field}{_Spw}{_Scan}{_Ant}{_title}{_Alphatitle}{_Colortitle}'"
                    subst = basesubst.copy()
                    subst["field"] = field_type
                    collect_plots(args,  config['shadems']['plots_by_field'], ["--iter-field"])

                # dispatch plots
                if plot_args:
                    step = 'plot-shadems-ms{0:d}'.format(iobs)
                    recipe.add("cab/shadems_direct", step,
                               dict(ms=msname, args=plot_args,
                                    ignore_errors=config["shadems"]["ignore_errors"]),
                               input=pipeline.input, output=output_dir,
                               label="{0:s}:: Plotting".format(step))
                else:
                    log.warning("The shadems section is enabled, but doesn't specify any plot_by_field or plot_by_corr")

            # old-school plots

            # define plot attributes
            diagnostic_plots = {}
            diagnostic_plots["real_imag"] = dict(
                plotms={"xaxis": "imag", "yaxis": "real",
                        "colouraxis": "baseline", "iteraxis": "corr"},
                shadems={"xaxis": "real", "yaxis": "imag"},
                ragavi_vis={"xaxis": "real", "yaxis": "imaginary",
                            "iter-axis": "scan", "canvas-width": 300,
                            "canvas-height": 300})

            diagnostic_plots["amp_phase"] = dict(
                plotms={"xaxis": "amp", "yaxis": "phase",
                        "colouraxis": "baseline", "iteraxis": "corr"},
                shadems={"xaxis": "amp", "yaxis": "phase"},
                ragavi_vis={"xaxis": "phase", "yaxis": "amplitude",
                            "iter-axis": "corr", "canvas-width": 1080,
                            "canvas-height": 720})

            diagnostic_plots["amp_ant"] = dict(
                plotms={"xaxis": "antenna", "yaxis": "amp",
                        "colouraxis": "baseline", "iteraxis": "corr"},
                shadems={"xaxis": "ANTENNA1", "yaxis": "amp"},
                ragavi_vis=None)

            diagnostic_plots["amp_uvwave"] = dict(
                plotms={"xaxis": "uvwave", "yaxis": "amp",
                        "colouraxis": "baseline", "iteraxis": "corr"},
                shadems={"xaxis": "UV", "yaxis": "amp"},
                ragavi_vis={"xaxis": "uvwave", "yaxis": "amplitude",
                            "iter-axis": "scan", "canvas-width": 300,
                            "canvas-height": 300})

            diagnostic_plots["phase_uvwave"] = dict(
                plotms={"xaxis": "uvwave", "yaxis": "phase",
                        "colouraxis": "baseline", "iteraxis": "corr"},
                shadems={"xaxis": "UV", "yaxis": "phase"},
                ragavi_vis={"xaxis": "uvwave", "yaxis": "phase",
                            "iter-axis": "scan", "canvas-width": 300,
                            "canvas-height": 300})

            diagnostic_plots["amp_scan"] = dict(
                plotms={"xaxis": "scan", "yaxis": "amp"},
                shadems={"xaxis": "SCAN_NUMBER", "yaxis": "amp"},
                ragavi_vis={"xaxis": "scan", "yaxis": "amplitude",
                            "iter-axis": None,
                            "canvas-width": 1080, "canvas-height": 720})

            diagnostic_plots["amp_chan"] = dict(
                plotms={"xaxis": "chan", "yaxis": "amp"},
                shadems={"xaxis": "CHAN", "yaxis": "amp"},
                ragavi_vis={"xaxis": "channel", "yaxis": "amplitude",
                            "iter-axis": "scan", "canvas-width": 300,
                            "canvas-height": 300})

            diagnostic_plots["phase_chan"] = dict(
                plotms={"xaxis": "chan", "yaxis": "phase"},
                shadems={"xaxis": "CHAN", "yaxis": "phase"},
                ragavi_vis={"xaxis": "channel", "yaxis": "phase",
                            "iter-axis": "scan", "canvas-width": 300,
                            "canvas-height": 300})

            if plotter.lower() != "none":
                for plotname in diagnostic_plots:
                    if not pipeline.enable_task(config, plotname):
                        continue
                    opts = diagnostic_plots[plotname][plotter]
                    if opts is None:
                        log.warn("The plotter '{0:s}' cannot make the plot '{1:s}'".format(
                            plotter, plotname))
                        continue
                    elif plotter == "ragavi_vis":
                            opts["num-cores"] = config["num_cores"]
                            opts["mem-limit"] = config["mem_limit"]

                    # make map from field name to field_type, field_id
                    field_map = OrderedDict()
                    for field_type in fields:
                        for field in getattr(pipeline, field_type)[iobs]:
                            if label_in != '' and field_type == 'target':
                                fid = 0
                            else:
                                fid = utils.get_field_id(ms_info_dict, field)[0]
                            field_map.setdefault(field, (field_type, fid))

                    if plotter == "shadems":
                        corr = corr.replace(" ", "").split(",")
                        for it, co in enumerate(corr):
                            if co in ms_corrs:
                                corr[it] = str(ms_corrs.index(co))
                        corr = ",".join(corr)
                        # for each corr
                        for co in corr.split(","):
                            opts["corr"] = co
                            for field, (field_type, fid) in field_map.items():
                                globals()[plotter](pipeline, recipe, config,
                                                   plotname, msname, field,
                                                   iobs, label, msbase, opts,
                                                   ftype=field_type, fid=fid, output_dir=output_dir,
                                                   corr_label=ms_corrs[int(co)])

                    elif plotter == "ragavi_vis" and not opts["iter-axis"] == "corr":
                        # change the labels to indices
                        corr = corr.replace(" ", "").split(",")
                        for it, co in enumerate(corr):
                            if co in ms_corrs:
                                corr[it] = str(ms_corrs.index(co))
                        corr = ",".join(corr)

                        # for each corr
                        for co in corr.split(","):
                            opts["corr"] = co
                            for field, (field_type, fid) in field_map.items():
                                globals()[plotter](pipeline, recipe, config,
                                                   plotname, msname, field,
                                                   iobs, label, msbase, opts,
                                                   ftype=field_type, fid=fid, output_dir=output_dir,
                                                   corr_label=ms_corrs[int(co)])
                    else:
                        opts["corr"] = corr
                        for field, (field_type, fid) in field_map.items():
                            globals()[plotter](pipeline, recipe, config,
                                               plotname, msname, field, iobs, label,
                                               msbase, opts, ftype=field_type,
                                               fid=fid, output_dir=output_dir)
Пример #11
0
def setup_default_notebooks(notebooks, output_dir, prefix, config):
    # setup logos
    logodir = os.path.join(output_dir, ".logo")
    if not os.path.exists(logodir):
        os.mkdir(logodir)
    for png in glob.glob(os.path.join(SOURCE_NOTEBOOK_DIR, "*.png")):
        shutil.copy2(png, logodir)

    for notebook in notebooks:
        nbfile = notebook + ".ipynb"
        nbdest = os.path.join(
            output_dir, "{}-{}".format(prefix, nbfile) if prefix else nbfile)

        # overwrite destination only if source is newer
        dest_mtime = os.path.getmtime(nbdest) if os.path.exists(nbdest) else 0

        # if source is a template, invoke jinja
        nbsrc = os.path.join(SOURCE_NOTEBOOK_DIR, nbfile + ".j2")
        if os.path.exists(nbsrc):
            if os.path.getmtime(nbsrc) > dest_mtime:
                global _j2env
                if _j2env is None:
                    _j2env = jinja2.Environment(
                        loader=jinja2.PackageLoader('caracal', 'notebooks'),
                        autoescape=jinja2.select_autoescape(['html', 'xml']))

                template = _j2env.get_template(nbfile + ".j2")
                log.info("Creating standard notebook {} from template".format(
                    nbdest))

                with open(nbdest, "wt") as file:
                    try:
                        print(template.render(**config), file=file)
                    except jinja2.TemplateError as exc:
                        log.error(
                            "Error rendering notebook template: {}".format(
                                exc),
                            extra=dict(boldface=True))
                        log.info(
                            "  More information can be found in the logfile at {0:s}"
                            .format(caracal.CARACAL_LOG))
                        for line in traceback.format_exc().splitlines():
                            log.error(line, extra=dict(traceback_report=True))
                        log.info("This is not fatal, continuing")
            else:
                log.info(
                    "Standard notebook {} already exists, won't overwrite".
                    format(nbdest))
            continue

        # if source exists as is, copy
        nbsrc = os.path.join(SOURCE_NOTEBOOK_DIR, nbfile)
        if os.path.exists(nbsrc):
            if os.path.getmtime(nbsrc) > dest_mtime:
                log.info("Creating standard notebook {}".format(nbdest))
                shutil.copyfile(nbsrc, nbdest)
            else:
                log.info(
                    "Standard notebook {} already exists, won't overwrite".
                    format(nbdest))
            continue

        log.error("Standard notebook {} does not exist".format(nbsrc))
Пример #12
0
def log_logo():
    # print("WWWWWWWWMMWMMWWMMWWWWWMWWWWMMMMWWWWWWWWWWWWWWWWWWWWMMMMWWWWWWWWWWWWWWWWWWWWWWWWWMMMWWWWWMWWWWWWWWWWWWWNNNNWWMMWWMWWWWWWW")
    # print("WWWWWWWMMWWWWWWMMWWWWWWWWWWMMWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWMMMMMMMWWWWWWWWWWWWWWWWWWWWWWWWMWWWWXkO0KWWWWWWWWWWWWW")
    # print("WWMWWWWWWWWWWWKOxdollcok00KKXWWWWMMMMWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWNXKKXXXXXNWWWWWWWN0d::ckWMWWMMWWWWWWW")
    # print("WMWWMWWWWWN0dc;'....',cxOOkkkO0XWMWWMMWWWWWX0xolllccc::::cllooddxxxxdddoooooooollcc;,,,;;;;;:cldxO0Ol,'.'lKWWWWMWWWWWWWW")
    # print("MWWWWWWWXkc'.''...,ckKNWWWNXKOkk0XWWWWWWKxc,.....................................................',''''.,xNWWWWWWWWWWWWW")
    # print("MWWWWWNk:'',;,'.,l0NWWWWWWMWWNKOkkKWWKxc'........................................................'''''''',dXWWMMWWWWWWWW")
    # print("WWWWW0c'',;:;'.:kNWWWMWWWWWMWWWN0kdoc'....,ll'.....................................................''''''',kWWWMMWWWWWWW")
    # print("MWWWO;.';cc,''cKWWWWWMMMMWWWWWWXx:.....:dkkx:...................................................';::;'.''''oNWWWWWWWWWWW")
    # print("WWWO;.';cc;'.cKWWWWMMWWWWWWWWW0;...'..':l;.....................................................,oKNNKOd:,,:xXWWWWWWWWWWW")
    # print("WWKc.';cc:'.;0WWWWWMMMWMMWWWWW0odOOc..........................',,'...................'.........cKWWWWWWN0KNWWWWWWWWWWWWW")
    # print("MWx'.,:cc,.'dWMWWWWWWWMWMMMWMMWWWWk'....................;cdkO0KXXOxdoc,''''',;;:lokOOkc........,cxXWWWWWWWMWWWMMWWWWWWWW")
    # print("MNl.';cc:'.;OWWWWWMWWWWWMWWMWWWWWO;...''.............:dOXWWMMWWWWMMWWNX0KK00KXXNWWWWWWXkl:,'.....':x0NWWWWMMWWMMMWWWWWWW")
    # print("WKc.';cc:'.:KWWWWWWWMWWMWWMWWWWWO;...,x0kdoloolc:cox0NMWWWWMMWMMWWWMWWMWWWWMMWWWWWWWWWWWWNKOxoc:,'..,lONWWWWWMMMWWWWWWWW")
    # print("WKc.';cc:'.:KWWMWWWWMMMWMMWWWMXo'....l0NWWWWWMWWWWMWWWMWMMWMMMMWWWWWMMWWMWWWWWWMWWWWWWMWWWWWWWWNk;.''.,xNWWWMMWWWWWWWWWW")
    # print("WXl.';cc:'.;0MWWWWWWWWWWWMMWWWx.....:x0NWWWMMMWWWWMWWMWWWWMWWWMMMWWWMWWMMMWWMWWWWMWWMMMWWWWMWMWWNk:'''.;OWWWWWWMMWWWWWWW")
    # print("WWd'.,ccc,.'xWMWWWWWWWWMWWMWWNo..;loxkKWWWMMWWMMMWWWMWWMWWWWWWWMMWWWWWMMMWWWWMWWMMWWMMWWMWWMWMMWMWX00OdxXWWWWMWWWWWWWWWW")
    # print("WW0:.';cc;'.cKMWWWWWWWWWWWWWWWNOdxxdxOXWWMXOkkkkkkOKNWWMMWWMMN0kkOXWMWMMMWWN0kxxxk0XWMWWMWN0kkONWWWWMNOkkKWWWWWWWWWWWWWW")
    # print("WWWk,.':cc,''oXWWWWWWWWWWWWWWWXo'..'cOWMWWO;..''''',:dKWWWWNNx,..'lXMWMMWKd:'.''''';dXMWMNd'..'oNMWWW0;..lNWWMMWWWWWWWWW")
    # print("MWWNx,.';c:,.'oXWWMWWWMWWMMWWNd'.,,.'dNWWWO;..cO0Od,.'oNMWWkc,.,;''oXMMW0:..;dO00kooONWWNx,.,;.'dNMWW0;..lNWWWWMWWMWWWWW")
    # print("WWWWNk;.',::,.'lKWWWWMWWWWWWNx,.,ol,.,xWWWO;..lXNXk;..cXWWO;..,xO:.'dNMNo..,kWWWWWWWWWWWk,.;kk;.,xWWW0;..lNWWWWWWMMMWWMW")
    # print("WWWWWWKo,.';;,'.;xXWWWWMMWWWk,.';ll;'.;kWMO;..,:::,.'c0WW0:...:dkc'.,dNNl..;OWWWWWWWWWWk;.':xxc'.,kWW0;..lNWWWWWMWWWWMMM")
    # print("WMWWMWWNOl,.',''.':d0NWWWWWO;.'',,,,,'.;OWO;..;ol;..:0WWKc.'',,,,,,'.,xNO:.';d0K0kod0NO;.',,,,,,'.;OW0;..cO0000KNMWWWWWW")
    # print("WMWWMWWWWWKxc,'.....,cdOK0x;.'cOKKKKOc..:0O;..oNW0c'':OKl..:dOKKKK0o'.,kNKd:'.',''';dk:..l0KKKK0c'.;O0;..'''''':0MWWWWWW")
    # print("WWWWWWWMWWWWNKkdlc:;;,:dOOxdxOXWWWWWWKkkkKXOkkKWWWXOkk0KOkkKWWWWWWWXOkk0NMWX0kxxxk0XWKkkOXWWWWWWXOkkKXOkkkkkkkkONMWWWMMW")
    # print("WWWWWWWMMWWWWWWWWWNXKKXNNWWWMWWWWMWWWWWMWWWWMMWWWWWWWWMMMWMMWWWWWWWWMMWWWWWWMMWWWWWWMMMWWMWWMWWWWMMMMWMMMMWWMWWMMWWWWWWW")
    # print("WWWWWWWMMWWMWWWWMMWWWWWWWWWWWWWWMWWWWWWWWMWWWWMWWWMWWWWMWWWMMWWWMWWWWMMWWWWWWWMMMMWWMMMWMWWMWWMWMMWWMWWMMWMWWMWWWWMMMWWW")

    print("""
........................................................................................................................
..........................................................................................................Z.~...........
...........................................................................................................Z.O..........
..................,8OOOOOZ==++,...........................................................................ZZOZ..........
...............?OZOOOOOOOO+======..................~=$ZOOO8OOZ~ ............~~....~7ZZOZOZZZOZZOO$.....,ZZZZZ=..........
.............OOOOOOOOOO$.....~=====...........$88888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZ7...........
...........OOOOO$OOOO7..........====,.......88888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZ...........
.........~ZOOO77OOOO.............:===~...Z8888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZ..........
........$OOOZIIOOOO................====88888887Z8888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZZ.........
.......OOOOIIIOOOO..................?888888O++.OO888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZZ.........
......+OOO7IIOOOO.................:O8888O8,..Z888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZ?.,ZZZZZZZZ=........
....,.OOOII7ZOOO.................8888D78888888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZO:......+OZ$Z.........
.....OOOO77IOOO,..................8,.:I8888888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZO.....................
.....OOOI7I7OOO.......................88888888888888OOOOOOOOOOO~....II8OOOOOOOOOOOOOOO$$,ZZZZZZZZZZZ....................
....$OOO77IOOOZ......................888888888888888OOOOOOO?............?IZI$Z$=?I........ZZZZZZZZZZOZ..................
....OOO$II7OOO,.....................?8888O888888888888888...................................ZZZZZZZZZZZ?................
....OOO7I7IOOO.....................,88887+.,,Z8ID88888..........................................IOZZZZZZZ=..............
....OOO7IIIOOO....................OO8888==..........................................................ZZZZZZI.............
....OOO7IIIOOO...................+888888==..........................................................OZZZZZZ.............
....OOO$II7OOO+..................88888O+==............................................................ZZZZZZ............
....?OOOII7$OOZ..................D888:+=+:................................................................O.............
.....OOO7777OOZ.......................==+...............................................................................
.....7OOOI77OOOZ..................$OOOO=+.....OOOOOOZOO?........$OOOO.........IOOOOOOO?.......OOOO=......OOOO...........
......OOOZI77OOZ..................OOOOOO......OOOOOOOOOOO~......OOOOOO......IOOOOOOOOOOO.....ZOOOOO......OOOO...........
.......OOO$7IZOOO................OOOZZOO......OOOO....ZOOO.....OOO8OOO.....?OOOO.....OZ.....=OOOOOOZ.....OOOO...........
........OOOOI7OOOO..............IOOO+ZOOO.....OOOO....OOOO....7OOO.8OOO....OOOO.............OOZ=.OOO~....OOOO...........
.........OOOOI7OOOO.............OOOI==OOOO....OOOO++IOZOOO...:OOO~..OOOO...OOOO............OOOZ..$OOO....OOOO...........
..........8OOOOIOOOO$..........OOOOOOOOOOO+...OOOOOOOOOOZ....OOOOOOOOOOO,..OOOO...........ZOOOOOOOOOOO...OOOO...........
...........,8OOOO$OOOZ........OOOOOOOOOOOOO...OOOO::OOOO....OOOOOOOOOOOOO..:OOOO=....OO...OOOOOOOOOOOO$..OOOO...........
..............OOOOOOOOOOO===++OOO7,.....OOOO..OOOO...OOOO..~OOO=......OOOO...ZOOOOOOOOOO.OOOO......ZOOO..OOOOOOOOOOO....
................~OOOOOOOOO+=+OOOO.......?OOO$.OOOO....ZOOZ.ZOOO.......$ZOO?....ZOOOOO7..ZOOO,.......OOOO.OOOOOOOOOOO....
......................+I7~=~............................................................................................
........................................................................................................................
........................................................................................................................
""")

#     print("""
# ................................................................................
# ........................................................................?.......
# ..............:~~:,....................................................Z$.......
# ..........ZOOOOOO+==+=...........+7O88OOOI,......,+7~+?OOZZOZZZZZ=...ZZZZ.......
# ........OOOOOOO.....,==+.....:88888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZ........
# ......OOOIOOO.........+=+..88888888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZ,......
# .....ZOOIOOO...........==88888+I888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZ......
# ....OOZI$OO............8888I.$DO888OOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZ..ZZZZZ .....
# ...$OO7IOO............88.+888888888OOOOOOOOOOOOOOOOOOOOOOZZZZZZZZO..............
# ...OO7I$OO...............8888888888OOOOO8....I,OOOOOOOOO?=.OZZZZZZO.............
# ...OOIIOO=..............D888888888OOOO......................,ZZZZZZZ:...........
# ...OOIIOO..............I888+..:...Z..............................OZZZZ:.........
# ...OOIIOO.............8888==.......................................ZZZZ.........
# ...OOIIOO7............8888==........................................ZZZZ........
# ...OOII7OO...............+=.....................................................
# ....OOIIOO.............OOO=...$OOOOOO+.....OOO.....~OOOOOO....8OOZ....OOO.......
# ....ZOZIIOO...........OOOOO...$OO...OOI...OOOOO...ZOO,..=O...,OOOO....OOO.......
# .....OOOI$OO.........OOO=OO:..$OO...OOZ..OOO.OO,..OO~........OO.:OO...OOO.......
# ......=OOO7OO?......:OOOOOOO..$OOOOOOO..=OOOOZOO..OOZ.......OOOOOOOO..OOO.......
# ........ZOOOOOO+....OOZ$$$OOO.$OZ.,OO7..OO$$$$OOO.,OOOZOOO~$OO$$$$OO=.OOZOOOO...
# ...........OOOOOO+=OOO.....OO$$OO...OOOZOO.....ZO?..:OOOO..OO,....~OO.OOOOOOO...
# ................................................................................
# ................................................................................
#     """)

    log.info("Version {1:s} installed at {0:s}".format(pckgdir, str(__version__)))
Пример #13
0
def main(argv):
    # parse initial arguments to init basic switches and modes
    parser = config_parser.basic_parser(argv)
    options, _ = parser.parse_known_args(argv)

    caracal.init_console_logging(boring=options.boring, debug=options.debug)
    stimela.logger().setLevel(logging.DEBUG if options.debug else logging.INFO)

    # user requests worker help
    if options.worker_help:
        if not print_worker_help(options.worker_help):
            parser.error("unknown worker '{}'".format(options.worker_help))
        return

    # User requests default config => dump and exit
    if options.get_default:
        sample_config = SAMPLE_CONFIGS.get(options.get_default_template)
        if sample_config is None:
            parser.error("unknown default template '{}'".format(options.get_default_template))
        sample_config_path = os.path.join(pckgdir, "sample_configurations", sample_config)
        if not os.path.exists(sample_config_path):
            raise RuntimeError("Missing sample config file {}. This is a bug, please report".format(sample_config))
        # validate the file
        try:
            parser = config_parser.config_parser()
            _, version = parser.validate_config(sample_config_path)
            if version != SCHEMA_VERSION:
                log.warning("Sample config file {} version is {}, current CARACal version is {}.".format(sample_config,
                                                                                                         version,
                                                                                                         SCHEMA_VERSION))
                log.warning("Proceeding anyway, but please notify the CARACal team to ship a newer sample config!")
        except config_parser.ConfigErrors as exc:
            log.error("{}, list of errors follows:".format(exc))
            for section, errors in exc.errors.items():
                print("  {}:".format(section))
                for err in errors:
                    print("    - {}".format(err))
            sys.exit(1)  # indicate failure
        log.info("Initializing {1} from config template '{0}' (schema version {2})".format(options.get_default_template,
                                                                                           options.get_default, version))
        shutil.copy2(sample_config_path, options.get_default)
        return

    if options.print_calibrator_standard:
        cdb = mkct.calibrator_database()
        log.info("Found the following reference calibrators (in CASA format):")
        log.info(cdb)
        return

    # if config was not specified (i.e. stayed default), print help and exit
    config_file = options.config
    if config_file == caracal.DEFAULT_CONFIG:
        parser.print_help()
        sys.exit(1)

    try:
        parser = config_parser.config_parser()
        config, version = parser.validate_config(config_file)
        if version != SCHEMA_VERSION:
            log.warning("Config file {} schema version is {}, current CARACal version is {}".format(config_file,
                                    version, SCHEMA_VERSION))
            log.warning("Will try to proceed anyway, but please be advised that configuration options may have changed.")
        # populate parser with items from config
        parser.populate_parser(config)
        # reparse arguments
        caracal.log.info("Loading pipeline configuration from {}".format(config_file), extra=dict(color="GREEN"))
        options, config = parser.update_config_from_args(config, argv)
        # raise warning on schema version
    except config_parser.ConfigErrors as exc:
        log.error("{}, list of errors follows:".format(exc))
        for section, errors in exc.errors.items():
            print("  {}:".format(section))
            for err in errors:
                print("    - {}".format(err))
        sys.exit(1)  # indicate failure
    except Exception as exc:
        traceback.print_exc()
        log.error("Error parsing arguments or configuration: {}".format(exc))
        if options.debug:
            log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
            pdb.post_mortem(sys.exc_info()[2])
        sys.exit(1)  # indicate failure

    if options.report and options.no_reports:
        log.error("-report contradicts --no-reports")
        sys.exit(1)

    log_logo()
    # Very good idea to print user options into the log before running:
    parser.log_options(config)

    execute_pipeline(options, config, block=True)