Пример #1
0
def main():

    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.DEBUG)
    args = parse_arguments()

    log.info('Creating bias report for radiometer "%s"', args.polarimeter_name)
    log.info('Reading data from file "%s"', args.input_url)
    log.info('Writing the report into "%s"', args.output_path)

    # Create the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    # Load from the text file only the columns containing the output of the four detectors
    log.info('Loading metadata for test "{0}"'.format(args.input_url))
    metadata = load_metadata(args.input_url)
    params = build_dict_from_results(pol_name=args.polarimeter_name,
                                     metadata=metadata)
    params["data_url"] = args.input_url

    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "bias_configuration.json"),
    )

    create_report(
        params=params,
        md_template_file="biases.md",
        md_report_file="bias_report.md",
        html_report_file="bias_report.html",
        output_path=args.output_path,
    )
Пример #2
0
def main():
    "Entry point of the program"

    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.DEBUG)
    args = parse_arguments()

    log.info('Tuning radiometer "%s"', args.polarimeter_name)
    log.info('Reading data from "%s"', args.input_path)
    log.info('Writing the report into "%s"', args.output_path)

    # Create the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    with open(args.settings_file_path, "rt") as settings_file:
        settings = load_settings_from_file(settings_file)

    # Create a dictionary associating names like "q1" with
    # a HemtProperties object
    hemt_dict = dict([(
        "q{0}".format(q),
        HemtProperties(q_index=q,
                       input_path=args.input_path,
                       output_path=args.output_path),
    ) for q in (1, 2, 3, 4, 5, 6)])
    balances = tune(hemt_dict, settings)

    params = build_dict_from_results(
        pol_name=args.polarimeter_name,
        hemt_dict=hemt_dict,
        balances=balances,
        settings=settings,
    )
    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "striptun_results.json"),
    )

    create_plots(hemt_dict.values())
    create_report(
        params=params,
        md_template_file="striptun.md",
        md_report_file="striptun_report.md",
        html_report_file="striptun_report.html",
        output_path=args.output_path,
    )
Пример #3
0
def main():

    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.INFO)
    args = parse_arguments()

    log.info('Tuning radiometer "%s"', args.polarimeter_name)
    log.info('Reading data from file "%s"', args.input_file_path)
    log.info('Writing the report into "%s"', args.output_path)

    # Create the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    # Load the output of the four detectors [ADU]
    log.info('Loading file from "{}"'.format(args.input_file_path))
    metadata, data = load_timestream(args.input_file_path)
    g, dataDEM, dataPWR, gains, delta_gains = get_data(metadata,
                                                       args.gains_file_path,
                                                       data)
    IQU = get_stokes(dataPWR, dataDEM)
    duration = get_duration(dataDEM, dataPWR, SAMPLING_FREQUENCY_HZ)

    if args.n_chunks is None:
        args.n_chunks = int(duration / 60 / 60 *
                            12)  # each chunk lasts 5 minutes by default

    log.info("File loaded, {} samples found".format(duration *
                                                    SAMPLING_FREQUENCY_HZ))

    # Calculate the covariance matrix and Pearson's correlation matrix
    cov_matrix_pwr = wn_cov(dataPWR)
    cov_matrix_dem = wn_cov(dataDEM)
    cov_matrix_stokes = wn_cov(IQU)

    corr_matrix_pwr = cov_to_corrcoef(cov_matrix_pwr)
    corr_matrix_dem = cov_to_corrcoef(cov_matrix_dem)
    corr_matrix_stokes = cov_to_corrcoef(cov_matrix_stokes)

    # Calculate the PSD
    log.info("Computing PSD with number-of-chunks={}, 1/f-upper-frequency={},".
             format(args.n_chunks, args.left_freq) +
             " WN-lower-frequency={}, detrend={}".format(
                 args.right_freq, args.detrend))

    freq, fftDEM, spectrogramDEM = get_fft(SAMPLING_FREQUENCY_HZ,
                                           dataDEM,
                                           args.n_chunks,
                                           detrend=args.detrend)
    (
        fit_parDEM,
        fkneeDEM,
        delta_fkneeDEM,
        alphaDEM,
        delta_alphaDEM,
        WN_levelDEM,
        delta_WN_levelDEM,
    ) = get_noise_characteristics(freq, fftDEM, args.left_freq,
                                  args.right_freq)
    [
        log.info("Computed fknee, alpha, WN_level for " + nam + " outputs")
        for nam in DEM
    ]

    fftPWR, spectrogramPWR = get_fft(SAMPLING_FREQUENCY_HZ,
                                     dataPWR,
                                     args.n_chunks,
                                     detrend=args.detrend)[1:]
    (
        fit_parPWR,
        fkneePWR,
        delta_fkneePWR,
        alphaPWR,
        delta_alphaPWR,
        WN_levelPWR,
        delta_WN_levelPWR,
    ) = get_noise_characteristics(freq,
                                  fftPWR,
                                  args.left_freq,
                                  args.right_freq,
                                  totalPWR=True)
    [log.info("Computed alpha for " + pwr + " outputs") for pwr in PWR]

    # Calculate the PSD for the combinations of the 4 detector outputs that returns I, Q, U
    fftIQU, spectrogramIQU = get_fft(SAMPLING_FREQUENCY_HZ,
                                     IQU,
                                     args.n_chunks,
                                     detrend=args.detrend)[1:]
    (
        fit_parIQU,
        fkneeIQU,
        delta_fkneeIQU,
        alphaIQU,
        delta_alphaIQU,
        WN_levelIQU,
        delta_WN_levelIQU,
    ) = get_noise_characteristics(freq,
                                  fftIQU,
                                  args.left_freq,
                                  args.right_freq,
                                  totalPWR="stokes")
    log.info("Computed fknee, alpha, WN_level for I, Q, U")

    fix_I_fknee(fkneeIQU, fit_parIQU, WN_levelIQU)

    # Get an approximate estimation of the 1/f reduction factor
    reduction1_f = get_y_intercept_1_f_reduction(freq, fit_parIQU)

    # Produce the plots
    create_plots(
        args.polarimeter_name,
        corr_matrix_pwr,
        corr_matrix_dem,
        corr_matrix_stokes,
        freq,
        fftDEM,
        fit_parDEM,
        DEM,
        fftPWR,
        fit_parPWR,
        PWR,
        fftIQU,
        fit_parIQU,
        STOKES,
        spectrogramDEM,
        spectrogramPWR,
        spectrogramIQU,
        args.output_path,
        g,
    )

    params = build_dict_from_results(
        pol_name=args.polarimeter_name,
        input_file_path=args.input_file_path,
        gains_file_path=args.gains_file_path,
        cov_matrix_pwr=cov_matrix_pwr,
        cov_matrix_dem=cov_matrix_dem,
        cov_matrix_stokes=cov_matrix_stokes,
        g=g,
        duration=duration,
        left_freq=args.left_freq,
        right_freq=args.right_freq,
        n_chuncks=args.n_chunks,
        detrend=args.detrend,
        reduction1_f=reduction1_f,
        fkneeDEM=fkneeDEM,
        fit_parDEM=fit_parDEM,
        delta_fkneeDEM=delta_fkneeDEM,
        alphaDEM=alphaDEM,
        delta_alphaDEM=delta_alphaDEM,
        WN_levelDEM=WN_levelDEM,
        delta_WN_levelDEM=delta_WN_levelDEM,
        fkneePWR=fkneePWR,
        fit_parPWR=fit_parPWR,
        delta_fkneePWR=delta_fkneePWR,
        alphaPWR=alphaPWR,
        delta_alphaPWR=delta_alphaPWR,
        WN_levelPWR=WN_levelPWR,
        delta_WN_levelPWR=delta_WN_levelPWR,
        fkneeIQU=fkneeIQU,
        fit_parIQU=fit_parIQU,
        delta_fkneeIQU=delta_fkneeIQU,
        alphaIQU=alphaIQU,
        delta_alphaIQU=delta_alphaIQU,
        WN_levelIQU=WN_levelIQU,
        delta_WN_levelIQU=delta_WN_levelIQU,
        gains=gains,
        delta_gains=delta_gains,
    )

    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "noise_characteristics_results.json"),
    )

    create_report(
        params=params,
        md_template_file="noise_characteristics.md",
        md_report_file="noise_characteristics_report.md",
        html_report_file="noise_characteristics_report.html",
        output_path=args.output_path,
    )
Пример #4
0
def main():
    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.DEBUG)
    args = parse_arguments()
    if args.analysis_method not in ANALYSIS_METHODS.keys():
        log.fatal(
            'unknown analysis method "{0}", available options are: {1}'.format(
                args.analysis_method,
                format(", ".join(
                    ['"{0}"'.format(x) for x in ANALYSIS_METHODS.keys()])),
            ))
        sys.exit(1)

    # Create the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    do_analysis_fn, make_report_fn = ANALYSIS_METHODS[args.analysis_method]

    log.info('reading file "%s"', args.tnoise1_results)
    with open(args.tnoise1_results, "rt") as json_file:
        tnoise1_results = json.load(json_file)

    log.info('reading file "%s"', args.raw_file)
    metadata, data = load_timestream(args.raw_file)
    if metadata:
        phsw_state = metadata["phsw_state"]
    else:
        phsw_state = args.phsw_state

    offsets = parse_offsets(args.offsets)

    temperatures_a, temperatures_b = extract_temperatures(metadata)
    log.info("temperatures for load A: %s", str(temperatures_a))
    log.info("temperatures for load B: %s", str(temperatures_b))

    voltages, voltage_std, wn_level, wn_err, nsamples, offsets = extract_average_values(
        data.power,
        data.demodulated,
        metadata,
        tnoise1_results,
        num=len(temperatures_a),
        offsets=offsets,
    )
    for idx, arr in enumerate(voltages):
        log.info("voltages for PWR%d: %s", idx,
                 ", ".join(["{0:.1f}".format(x) for x in arr]))
        log.info(
            "voltage RMS for PWR%d: %s",
            idx,
            ", ".join(["{0:.1f}".format(x) for x in voltage_std[idx]]),
        )
        log.info(
            "WN for PWR%d: %s",
            idx,
            ", ".join(["{0:.1f}".format(x) for x in wn_level[idx]]),
        )

    log_ln = LogLikelihood(
        voltages=voltages,
        voltage_std=voltage_std,
        wn_level=wn_level,
        wn_error=wn_err,
        nsamples=nsamples,
        temperatures_a=temperatures_a,
        temperatures_b=temperatures_b,
        phsw_state=phsw_state,
    )

    params = do_analysis_fn(args, log_ln)
    params["analysis_method"] = args.analysis_method
    params["test_file_name"] = args.raw_file
    params["offsets"] = offsets

    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "tnoise_step2_results.json"),
    )

    make_report_fn(log_ln, params, args.output_path)
Пример #5
0
def main():

    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.DEBUG)
    args = parse_arguments()

    log.info('Tuning radiometer "%s"', args.polarimeter_name)
    log.info('Reading data from file "%s"', args.input_url)
    log.info('Writing the report into "%s"', args.output_path)

    # Create the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    # Load from the text file only the columns containing the output of the four detectors
    log.info('Loading data from "{0}"'.format(args.input_url))
    timestream = load_timestream(args.input_url)[1]
    power = timestream.power

    log.info("File loaded, {0} samples found".format(len(power[:, 0])))

    time = np.arange(len(power[:, 0])) / SAMPLING_FREQUENCY_HZ
    slopes = [
        slope(time, power[:, i], chunk_len=25 * 60, step=25 * 3)
        for i in range(4)
    ]

    # Find the blind channel
    blind_channel, slope_threshold = find_blind_channel(slopes)
    log.info("The blind channel is PWR{0}".format(blind_channel))
    log.info("The maximum threshold on the slope is {0:.1f} ADU/s".format(
        slope_threshold))

    # Look for the places where the signal seems to be stable enough
    regions = {}
    num_of_regions = None
    for curve_idx in range(4):
        if curve_idx == blind_channel:
            continue

        regions[curve_idx] = find_stable_regions(
            slopes=slopes[curve_idx],
            slope_threshold_adu_s=1.5 * slope_threshold,
            duration_threshold_s=60.0,
            clipping_s=15.0,
            first_region_length=args.first_region_length,
            regions=parse_region_list_str(args.regions),
        )
        if not num_of_regions:
            num_of_regions = len(regions[curve_idx])
        else:
            if num_of_regions != len(regions[curve_idx]):
                log.warning(
                    "Mismatch in the number of quiet regions across the detectors: %d against %d",
                    num_of_regions,
                    len(regions[curve_idx]),
                )

    # Produce the plots
    for curve_idx in range(4):
        output_file_name = os.path.join(args.output_path,
                                        "plot_pwr{0}.svg".format(curve_idx))
        if curve_idx == blind_channel:
            curve_regions = []
        else:
            curve_regions = regions[curve_idx]

        save_plot(
            time,
            power[:, curve_idx],
            slopes[curve_idx],
            curve_regions,
            slope_threshold,
            output_file_name,
        )

    params = build_dict_from_results(
        pol_name=args.polarimeter_name,
        blind_channel=blind_channel,
        time=time,
        data=power,
        regions=regions,
    )
    params["data_url"] = args.input_url

    region_str = []
    for curve_idx in range(4):
        if curve_idx in regions and regions[curve_idx]:
            region_str.append(",".join([
                "{0:.0f}-{1:.0f}".format(x.time0_s, x.time1_s)
                for x in regions[curve_idx]
            ]))
        else:
            region_str.append("")
    params["region_str"] = region_str

    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "tnoise_step1_results.json"),
    )

    create_report(
        params=params,
        md_template_file="tnoise_step1.md",
        md_report_file="tnoise_step1_report.md",
        html_report_file="tnoise_step1_report.html",
        output_path=args.output_path,
    )
Пример #6
0
def main():
    log.basicConfig(format="[%(asctime)s %(levelname)s] %(message)s",
                    level=log.DEBUG)
    args = parse_arguments()

    log.info('Tuning radiometer "%s"', args.polarimeter_name)

    log.info('Writing the report into "%s"', args.output_path)

    # Creating the directory that will contain the report
    os.makedirs(args.output_path, exist_ok=True)

    norm_data_list, central_nu_det, bandwidth_det, PSStatus = (
        list(),
        list(),
        list(),
        list(),
    )
    low_nu, high_nu = np.zeros(1), np.zeros(1)

    for i, file_name in enumerate(args.file_list):

        # Loading file
        log.info('Loading file "{0}"'.format(file_name))

        # Analyzing bandpass test for this file
        (
            duration,
            low_nu,
            high_nu,
            pss,
            new_nu,
            new_data,
            norm_data,
            cf_det,
            bw_det,
        ) = AnalyzeBandTest(args.polarimeter_name, file_name, args.output_path)

        # Producing preliminary plots
        preliminary_plots(args.polarimeter_name, new_nu, new_data,
                          args.output_path, pss, i)

        # Saving normalized data for both phase-switch status
        central_nu_det.append(cf_det)
        bandwidth_det.append(bw_det)
        norm_data_list.append(norm_data)
        PSStatus.append(pss)

        # Saving bandpass data of all detectors to .txt file
        np.savetxt(
            args.output_path + "bandpass_data_" + pss + "_" + str(i) + ".txt",
            np.column_stack([new_nu, new_data]),
            header="\t\t".join(["nu", "PW0/Q1", "PW1/U1", "PW2/U2", "PW3/Q2"]),
        )

    log.info(
        "Computed bandwidth and central frequency for each detector for both phase-switch status"
    )

    central_nu_det = np.array(central_nu_det)
    bandwidth_det = np.array(bandwidth_det)
    norm_data_All = np.column_stack(norm_data_list)

    All_central_nu, All_bandwidth = get_central_nu_bandwidth(
        new_nu, norm_data_All)

    # Computing the final band
    final_band = np.median(norm_data_All, axis=1)
    final_band_err = (np.percentile(norm_data_All, 97.7, axis=1) -
                      np.percentile(norm_data_All, 2.7, axis=1)) / 2

    # Producing final plots
    final_plots(
        args.polarimeter_name,
        new_nu,
        norm_data_All,
        final_band,
        final_band_err,
        args.output_path,
    )

    # Computing final central frequency and final bandwidth
    final_central_nu, final_bandwidth = get_central_nu_bandwidth(
        new_nu, final_band[:, None])

    # Computing errors for central frequency and bandwidth
    final_central_nu_err = (np.percentile(All_central_nu, 97.7) -
                            np.percentile(All_central_nu, 2.7)) / 2
    final_bandwidth_err = (np.percentile(All_bandwidth, 97.7) -
                           np.percentile(All_bandwidth, 2.7)) / 2

    log.info("Computed final bandwidth and final central frequency")

    # Creating the report
    params = build_dict_from_results(
        args.polarimeter_name,
        duration,
        low_nu,
        high_nu,
        PSStatus,
        central_nu_det,
        bandwidth_det,
        new_nu,
        final_band,
        final_central_nu,
        final_central_nu_err,
        final_bandwidth,
        final_bandwidth_err,
    )

    save_parameters_to_json(
        params=dict(params, **get_code_version_params()),
        output_file_name=os.path.join(args.output_path,
                                      "bandwidth_results.json"),
    )

    create_report(
        params=params,
        md_template_file="bandwidth.md",
        md_report_file="bandwidth_report.md",
        html_report_file="bandwidth_report.html",
        output_path=args.output_path,
    )