Esempio n. 1
0
def test_single_chunk(caplog):
    """Check that Level2File copes with reading a file containing a single chunk."""
    # Need to override the test level set above
    caplog.set_level(logging.WARNING, 'metpy.io.nexrad')
    f = Level2File(get_test_data('Level2_KLBB_single_chunk'))
    assert len(f.sweeps) == 1
    assert 'Unable to read volume header' in caplog.text

    # Make sure the warning is not present if we pass the right kwarg.
    caplog.clear()
    Level2File(get_test_data('Level2_KLBB_single_chunk'),
               has_volume_header=False)
    assert 'Unable to read volume header' not in caplog.text
Esempio n. 2
0
def test_level2(fname, voltime, num_sweeps, mom_first, mom_last):
    """Test reading NEXRAD level 2 files from the filename."""
    f = Level2File(get_test_data(fname, as_file_obj=False))
    assert f.dt == voltime
    assert len(f.sweeps) == num_sweeps
    assert len(f.sweeps[0][0][-1]) == mom_first
    assert len(f.sweeps[-1][0][-1]) == mom_last
Esempio n. 3
0
def test_msg15():
    """Check proper decoding of message type 15."""
    f = Level2File(
        get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False))
    data = f.clutter_filter_map['data']
    assert isinstance(data[0][0], list)
    assert f.clutter_filter_map['datetime'] == datetime(
        2013, 5, 19, 0, 0, 0, 315000)
Esempio n. 4
0
def test_doubled_file():
    """Test for #489 where doubled-up files didn't parse at all."""
    with contextlib.closing(
            get_test_data('Level2_KFTG_20150430_1419.ar2v')) as infile:
        data = infile.read()
    fobj = BytesIO(data + data)
    f = Level2File(fobj)
    assert len(f.sweeps) == 12
Esempio n. 5
0
def test_level2(fname, voltime, num_sweeps, mom_first, mom_last, expected_logs,
                caplog):
    """Test reading NEXRAD level 2 files from the filename."""
    caplog.set_level(logging.WARNING, 'metpy.io.nexrad')
    f = Level2File(get_test_data(fname, as_file_obj=False))
    assert f.dt == voltime
    assert len(f.sweeps) == num_sweeps
    assert len(f.sweeps[0][0][-1]) == mom_first
    assert len(f.sweeps[-1][0][-1]) == mom_last
    assert len(caplog.records) == expected_logs
Esempio n. 6
0
def getFile(fileName):
    s3 = boto3.resource('s3',
                        config=Config(signature_version=botocore.UNSIGNED,
                                      user_agent_extra='Resource'))

    bucket = s3.Bucket('noaa-nexrad-level2')
    for obj in bucket.objects.filter(Prefix=fileName):
        print(obj.key)

        # Use MetPy to read the file
        return Level2File(obj.get()['Body'])
def read_nexRad(filename):

    # Open the file
    # name = get_test_data('PHWA20201031_000332_V06.gz', as_file_obj=False)
    f = Level2File(filename)

    # print(f.sweeps[0][0])
    # Pull data out of the file
    sweep = 0

    # First item in ray is header, which has azimuth angle
    az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

    # 5th item is a dict mapping a var name (byte string) to a tuple
    # of (header, data array)
    ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
    ref_range = np.arange(
        ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
    ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])

    # rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
    # rho_range = (np.arange(rho_hdr.num_gates + 1) - 0.5) * rho_hdr.gate_width + rho_hdr.first_gate
    # rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])

    fig, axes = plt.subplots(1, 1, figsize=(15, 8))

    # reflexivity plot
    data = np.ma.array(ref)
    data[np.isnan(data)] = np.ma.masked

    # Convert az,range to x,y
    xlocs = ref_range * np.sin(np.deg2rad(az[:, np.newaxis]))
    ylocs = ref_range * np.cos(np.deg2rad(az[:, np.newaxis]))

    # Plot the data
    axes.pcolormesh(xlocs, ylocs, data, cmap='viridis')
    axes.set_aspect('equal', 'datalim')
    axes.set_xlim(-150, 150)
    axes.set_ylim(-150, 150)
    #plt.axis('off')
    plt.show()
    return
    # redraw the plot
    fig.canvas.draw()

    # Now we can save it to a numpy array.
    width, height = fig.get_size_inches() * fig.get_dpi()
    data = np.fromstring(fig.canvas.tostring_rgb(),
                         dtype=np.uint8).reshape(int(height), int(width), 3)
    print(data.shape)
    plt.show()
    plt.imshow(data)
    plt.savefig('test.png', cmap='gray')
    '''
Esempio n. 8
0
def save_as_image(d, nexrad):
    LAYER1 = b"REF"
    LAYER2 = b"VEL"
    f = Level2File(str(nexrad))

    # Pull data out of the file
    for sweep in range(0, 21):
        try:
            print(f"rendering sweep {sweep}")
            # First item in ray is header, which has azimuth angle
            az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

            # 5th item is a dict mapping a var name (byte string) to a tuple
            # of (header, data array)
            ref_hdr = f.sweeps[sweep][0][4][LAYER1][0]
            ref_range = np.arange(
                ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
            ref = np.array([ray[4][LAYER1][1] for ray in f.sweeps[sweep]])
            try:
                rho_hdr = f.sweeps[sweep][0][4][LAYER2][0]
                rho_range = (np.arange(rho_hdr.num_gates + 1) -
                             0.5) * rho_hdr.gate_width + rho_hdr.first_gate
                rho = np.array([ray[4][LAYER2][1] for ray in f.sweeps[sweep]])
            except:
                rho_hdr = f.sweeps[sweep][0][4][b"RHO"][0]
                rho_range = np.arange(
                    rho_hdr.num_gates) * rho_hdr.gate_width + rho_hdr.first_gate
                rho = np.array([ray[4][b"RHO"][1] for ray in f.sweeps[sweep]])

            fig, axes = plt.subplots(1, 2, figsize=(15, 8))
            for var_data, var_range, ax in zip((ref, rho),
                                               (ref_range, rho_range), axes):
                # Turn into an array, then mask
                data = np.ma.array(var_data)
                data[np.isnan(data)] = np.ma.masked

                # Convert az,range to x,y
                xlocs = var_range * np.sin(np.deg2rad(az[:, np.newaxis]))
                ylocs = var_range * np.cos(np.deg2rad(az[:, np.newaxis]))

                # Plot the data
                ax.pcolormesh(xlocs, ylocs, data, cmap='viridis')
                ax.set_aspect('equal', 'datalim')
                ax.set_xlim(-275, 275)
                ax.set_ylim(-275, 275)
                add_timestamp(ax, f.dt, y=0.02, high_contrast=True)

            plt.savefig(str(d / OUT_PREFIX.format(f.dt.timestamp(), sweep)))
        except:
            print(f"sweep {sweep} failed, skipping")
Esempio n. 9
0
def test_level2_fobj(filename, use_seek):
    """Test reading NEXRAD level2 data from a file object."""
    f = get_test_data(filename)
    if not use_seek:

        class SeeklessReader:
            """Simulate file-like object access without seek."""
            def __init__(self, f):
                self._f = f

            def read(self, n=None):
                """Read bytes."""
                return self._f.read(n)

        f = SeeklessReader(f)
    Level2File(f)
Esempio n. 10
0
def main():
    manager = mp.Manager()
    results = manager.dict()
    pool = TPool(12)
    jobs = []

    startDateTime = datetime.datetime.strptime(args.convTime, '%Y%m%d%H%M')
    intervalDateTime = datetime.timedelta(
        hours=2, minutes=0
    )  #hours = int(args.convInterval[:2]), minutes=int([args.convInterval[2:]]))

    station = args.sensor

    # Query all L2 files for the sensor
    totalRadarObjects = []
    totalSweepDateTimes = []
    hrIter = datetime.timedelta(hours=0)
    while True:  # grab a specific interval of files
        radarObjects, sweepDateTimes = pull_data(startDateTime=(startDateTime+hrIter),\
                   station=station)
        totalRadarObjects.extend(radarObjects[:-1])
        totalSweepDateTimes.extend(
            sweepDateTimes[:-1])  # remove trailing *_MDM file
        if totalSweepDateTimes[-1] - startDateTime >= intervalDateTime:
            break
        else:
            hrIter += datetime.timedelta(hours=1)
    fileDict = {'L2File': totalRadarObjects, 'Time': totalSweepDateTimes}
    fileDF = pd.DataFrame(fileDict)
    print(
        f'Start time: {startDateTime}, Interval: {intervalDateTime}, End Time: {startDateTime + intervalDateTime}'
    )

    filesToStream = fileDF[((fileDF['Time'] >= startDateTime) \
        & (fileDF['Time'] <= startDateTime + \
        intervalDateTime))]['L2File'].tolist()       # Bitwise operators, conditions double wrapped in perentheses to handle overriding
    logging.info(f'files: {[obj.key for obj in filesToStream]}')
    if len(filesToStream) < 8:
        warnings.warn("n of radar inputs is not sufficent for curve smoothing",
                      UserWarning)

    # --- Stream files ahead of time to avoid error with multiprocessing and file handles ---
    filesToWorkers = []

    for L2FileStream in tqdm(filesToStream, desc="Streaming L2 Files"):
        try:
            if datetime.datetime.strptime(
                    L2FileStream.key[20:35],
                    '%Y%m%d_%H%M%S') >= datetime.datetime(2016, 1, 1):
                filesToWorkers.append(Level2File(L2FileStream.get()['Body']))
            else:
                bytestream = BytesIO(L2FileStream.get()['Body'].read())
                with gzip.open(bytestream, 'rb') as f:
                    filesToWorkers.append(Level2File(f))
        except:
            print("value Error, Most likely in parsing header")

    # --- Create pool for workers ---
    for file in filesToWorkers:
        job = pool.apply_async(calculate_radar_stats, (results, file))
        jobs.append(job)

    # --- Commit pool to workers ---
    for job in tqdm(jobs, desc="Bounding & Searching Data"):
        job.get()

    pool.close()
    pool.join()

    columns = [
        'sweepDateTime', 'metadata', 'sensorData', 'indices', 'xlocs', 'ylocs',
        'data', 'polyVerts', 'offset', 'areaValue', 'refValue', 'varRefValue'
    ]
    print(
        'Creating Dataframe... (This may take a while if plotting significant data)'
    )
    resultsDF = pd.DataFrame.from_dict(results,
                                       orient='index',
                                       columns=columns)  #SUPER slow
    print('Converting datetimes...')
    resultsDF['sweepDateTime'] = pd.to_datetime(resultsDF.sweepDateTime)
    print('Sorting...')
    resultsDF.sort_values(by='sweepDateTime', inplace=True)
    #resultsDF.to_csv(args.output + '.csv', index = False)
    print(resultsDF[['areaValue', 'refValue']].head(5))

    # --- Plot time series---
    fig, axes = plt.subplots(8, 8, figsize=(30, 30))
    date_format = mpl_dates.DateFormatter('%H:%Mz')

    for i, (dt, record) in tqdm(enumerate(resultsDF.iterrows()),
                                desc='Plotting Slices'):
        plotx = i % 8
        ploty = int(i / 8)

        negXLim = -.5
        posXLim = 1.5
        negYLim = -1.0
        posYLim = 1.0
        norm, cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
        tempdata = record[
            'data']  # create a deep copy of data to maipulate for plotting
        tempdata[tempdata == 0] = np.ma.masked  # mask out 0s for plotting

        axes[ploty][plotx].pcolormesh(record['xlocs'],
                                      record['ylocs'],
                                      tempdata,
                                      norm=norm,
                                      cmap=cmap,
                                      shading='auto')
        axes[ploty][plotx].set_aspect(aspect='equal')
        axes[ploty][plotx].set_xlim(negXLim, posXLim)
        axes[ploty][plotx].set_ylim(negYLim, posYLim)
        pVXs, pVYs = zip(
            *record['polyVerts']
        )  # create lists of x and y values for transformed polyVerts
        axes[ploty][plotx].plot(pVXs, pVYs)
        if negXLim < record['offset'][1] < posXLim and \
        negYLim < record['offset'][0] < posYLim:
            axes[ploty][plotx].plot(record['offset'][1], record['offset'][0],
                                    'o')  # Location of the radar
            axes[ploty][plotx].text(record['offset'][1], record['offset'][0],
                                    record['sensorData']['siteID'])

        axes[ploty][plotx].plot(0.0, 0.0, 'bx')  # Location of the convection
        axes[ploty][plotx].text(0.0, 0.0, str(args.convLatLon))
        add_timestamp(axes[ploty][plotx],
                      record['sweepDateTime'],
                      y=0.02,
                      high_contrast=True)
        axes[ploty][plotx].tick_params(axis='both', which='both')

    print('Calculating Statistics...')

    # pull data out of DF to make code cleaner
    datetimes = resultsDF['sweepDateTime'].tolist()
    #elapsedtimes = list(map(lambda x: x - min(datetimes), datetimes))						# not currently used, need to get this working
    areaValues = resultsDF['areaValue'].tolist()  # area ≥ 35dbz within ROI
    refValues = np.array(
        resultsDF['refValue'].tolist()
    )  # mean reflectivity ≥ 35dbz within ROI (conversion: (val-65)*0.5) [https://mesonet.agron.iastate.edu/GIS/rasters.php?rid=2]
    if np.nan in refValues:
        warnings.warn(
            "Radar inputs contains instance with no ref values >= thresh",
            UserWarning)
    varValues = resultsDF['varRefValue'].tolist(
    )  # variance of mean reflectivity ≥ 35dbz within ROI
    cvValues = np.array([
        a / b for a, b in zip(varValues, refValues)
    ]) * 0.5  # coeff. of variation for mean reflectivity ≥ 35dbz within ROI

    # Frequency
    N = len(refValues)
    T = 1.0 / N
    yf = fft(refValues)
    w = blackman(N)
    ywf = fft(refValues * w)

    # Normalization
    areaNorm = areaValues / np.max(areaValues)
    xf = np.linspace(0, 1.0 / (2.0 * T), N // 2)
    cvNorm = cvValues / np.max(cvValues)
    areaCVValuesNormalized = np.multiply(areaNorm, cvNorm)

    # Curve Smoothing
    window = len(
        resultsDF.index
    ) // 8  # ~2 hours/8 = ~15 mins ----> number of samples in moving average ( helps counteract more visible noise in higher temporal resolution data)
    yAreaAvg = movingaverage(
        areaValues, window)[window // 2:-window //
                            2]  # create moving averages for time series'
    yRefAvg = movingaverage(refValues, window)[window // 2:-window // 2]
    yCVAvg = movingaverage(cvValues, window)[window // 2:-window // 2]
    yAreaCVNormAvg = movingaverage(areaCVValuesNormalized,
                                   window)[window // 2:-window // 2]

    # local minima & maxima on smoothed curves
    minTemporalWindow = window * 2

    areaLocalMax = argrelmax(yAreaAvg)
    areaLocalMin = argrelmin(yAreaAvg)
    endpoints = []
    if yAreaAvg[0] <= np.all(yAreaAvg[1:window+1]) or\
     yAreaAvg[0] >= np.all(yAreaAvg[1:window+1]):
        endpoints.append(0)
    if yAreaAvg[-1] <= np.all(yAreaAvg[len(yAreaAvg-1)-window+1:-2]) or\
     yAreaAvg[-1] >= np.all(yAreaAvg[len(yAreaAvg-1)-window+1:-2]):
        endpoints.append(len(yAreaAvg) - 1)
    #print(f'Area: Endpoints: {yAreaAvg[endpoints]}, Local Maxes: {yAreaAvg[areaLocalMax]}, Local Mins: {yAreaAvg[areaLocalMin]}')
    areaExtremaRaw = sorted(
        areaLocalMax[0].tolist() + areaLocalMin[0].tolist() + endpoints
    )  # combine mins, maxes, and endpoints (if endpoints are an extreme) then sort
    areaExtrema = [
        x for x in areaExtremaRaw[1:]
        if x - areaExtremaRaw[0] >= minTemporalWindow
    ]  # remove maxima that are within threshold of first one
    areaExtrema = [areaExtremaRaw[0]
                   ] + areaExtrema  # add back in forst one to begining
    logging.info(f'Area Values: {yAreaAvg}')
    logging.info(f'Area Extrema: {yAreaAvg[areaExtrema]}')

    refLocalMax = argrelmax(yRefAvg)
    refLocalMin = argrelmin(yRefAvg)
    endpoints = []
    if yRefAvg[0] <= np.all(yRefAvg[1:window+1]) or\
     yRefAvg[0] >= np.all(yRefAvg[1:window+1]):
        endpoints.append(0)
    if yRefAvg[-1] <= np.all(yRefAvg[len(yRefAvg-1)-window+1:-2]) or\
     yRefAvg[-1] >= np.all(yRefAvg[len(yRefAvg-1)-window+1:-2]):
        endpoints.append(len(yRefAvg) - 1)
    refExtremaRaw = sorted(refLocalMax[0].tolist() + refLocalMin[0].tolist() +
                           endpoints)
    refExtrema = [
        x for x in refExtremaRaw[1:]
        if x - refExtremaRaw[0] >= minTemporalWindow
    ]
    refExtrema = [refExtremaRaw[0]] + refExtrema
    logging.info(f'Ref Values: {yRefAvg}')
    logging.info(f'Ref Extrema: {yRefAvg[refExtrema]}')

    #cvLocalMax = argrelmax(yCVAvg)
    #cvLocalMin = argrelmin(yCVAvg)
    #endpoints = []
    #if yCVAvg[0] <= np.all(yCVAvg[1:window+1]) or\
    # yCVAvg[0] >= np.all(yCVAvg[1:window+1]):
    #	endpoints.append(0)
    #if yCVAvg[-1] <= np.all(yCVAvg[len(yCVAvg-1)-window+1:-2]) or\
    # yCVAvg[-1] >= np.all(yCVAvg[len(yCVAvg-1)-window+1:-2]):
    #	endpoints.append(len(yCVAvg)-1)
    #cvExtremaRaw = sorted(cvLocalMax[0].tolist()+cvLocalMin[0].tolist()+endpoints)
    #cvExtrema = [x for x in cvExtremaRaw[1:] if x-cvExtremaRaw[0]>=minTemporalWindow]
    #cvExtrema = [cvExtremaRaw[0]]+cvExtrema
    #logging.info((f'CV Values: {yCVAvg}')
    #logging.info((f'CV Extrema: {yCVAvg[cvExtrema]}')

    yAreaCVNormLocalMax = argrelmax(yAreaCVNormAvg)
    yAreaCVNormLocalMin = argrelmin(yAreaCVNormAvg)
    endpoints = []
    if yAreaCVNormAvg[0] <= np.all(yAreaCVNormAvg[1:window+1]) or\
     yAreaCVNormAvg[0] >= np.all(yAreaCVNormAvg[1:window+1]):
        endpoints.append(0)
    if yAreaCVNormAvg[-1] <= np.all(yAreaCVNormAvg[len(yAreaCVNormAvg-1)-window+1:-2]) or\
     yAreaCVNormAvg[-1] >= np.all(yAreaCVNormAvg[len(yAreaCVNormAvg-1)-window+1:-2]):
        endpoints.append(len(yAreaCVNormAvg) - 1)
    yAreaCVNormExtremaRaw = sorted(yAreaCVNormLocalMax[0].tolist() +
                                   yAreaCVNormLocalMin[0].tolist() + endpoints)
    yAreaCVNormExtrema = [
        x for x in yAreaCVNormExtremaRaw[1:]
        if x - yAreaCVNormExtremaRaw[0] >= minTemporalWindow
    ]
    yAreaCVNormExtrema = [yAreaCVNormExtremaRaw[0]] + yAreaCVNormExtrema
    logging.info(f'AreaCVNorm Extrema: {yAreaCVNormAvg[yAreaCVNormExtrema]}')

    # Find slopes of Build-up Lines
    # 	Area
    xArea = np.array(datetimes[window // 2:-window // 2])[np.array(
        [areaExtrema[0], areaExtrema[1]]
    )]  # grab datetime (x component) of the leftmost bounds (determined by window size), and the first extreme on the smoothed curve (sm curve is already bound by window, we need to apply bounds to datetimes)
    xAreaDiff = xArea[1] - xArea[
        0]  # subtract the later value from the former to get our delta x
    yArea = yAreaAvg[np.array(
        [areaExtrema[0], areaExtrema[1]]
    )]  # grab the values (y component) of the sm curve at the begining and at the first extreme
    yAreaDiff = yArea[1] - yArea[0]  # subtract to find delta y
    slopeArea = np.arctan(yAreaDiff /
                          xAreaDiff.seconds)  # calc the slope angle
    logging.info(f'Slope of Area: {slopeArea}')

    #   Reflectivity
    xRef = np.array(datetimes[window // 2:-window // 2])[np.array(
        [refExtrema[0], refExtrema[1]])]
    xRefDiff = xRef[1] - xRef[0]
    yRef = yRefAvg[np.array([refExtrema[0], refExtrema[1]])]
    yRefDiff = yRef[1] - yRef[0]
    slopeRef = np.arctan(yRefDiff / xRefDiff.seconds)
    print(f'Slope of Reflectivity: {slopeRef}')

    # 	Product of Area and Coefficent of Variation of Reflectivity
    xProduct = np.array(datetimes[window // 2:-window // 2])[np.array(
        [yAreaCVNormExtrema[0], yAreaCVNormExtrema[1]])]
    XProductDiff = xProduct[1] - xProduct[0]
    yProduct = yAreaCVNormAvg[np.array(
        [yAreaCVNormExtrema[0], yAreaCVNormExtrema[1]])]
    yProductDiff = yProduct[1] - yProduct[0]
    slopeProduct = np.arctan(yProductDiff / XProductDiff.seconds)
    print(f'Slope of Product: {slopeProduct}')

    print('Plotting Additional Data and Saving Output...')
    # Area for Reflectivity ≥ 35dbz
    axes[-1][-5].plot_date(datetimes, areaValues, linestyle='solid', ms=2)
    axes[-1][-5].plot_date(datetimes[window // 2:-window // 2],
                           yAreaAvg,
                           linestyle='solid',
                           ms=2)
    axes[-1][-5].plot_date(
        np.array(datetimes[window // 2:-window // 2])[np.array(
            [areaExtrema[0], areaExtrema[1]])],
        yAreaAvg[np.array([areaExtrema[0], areaExtrema[1]])],
        linestyle="solid",
        ms=2)
    axes[-1][-5].legend(['Area Delta', 'Sm. Area Delta', 'Build-up Rate'])
    axes[-1][-5].xaxis.set_major_formatter(date_format)
    plt.setp(axes[-1][-5].xaxis.get_majorticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")
    axes[-1][-5].set_title('Area of Reflectivity ≥ 35dbz (km^2)')

    # Mean of Reflectivity ≥ 35dbz
    axes[-1][-4].plot_date(datetimes, refValues, linestyle='solid', ms=2)
    #axes[-1][-4].plot_date(datetimes[window//2:-window//2], yRefAvg, linestyle='solid', ms=2)
    #axes[-1][-4].plot_date(np.array(datetimes[window//2:-window//2])[np.array([0,refLocalMax[0][0]])], yRefAvg[np.array([0,refLocalMax[0][0]])], linestyle="solid", ms=2)
    axes[-1][-4].plot_date(datetimes[window // 2:-window // 2],
                           yRefAvg,
                           linestyle='solid',
                           ms=2)
    axes[-1][-4].plot_date(np.array(
        datetimes[window // 2:-window // 2])[np.array(
            [refExtrema[0], refExtrema[1]])],
                           yRefAvg[np.array([refExtrema[0], refExtrema[1]])],
                           linestyle="solid",
                           ms=2)
    axes[-1][-4].legend(['Ref Delta', 'Sm. Ref Delta', 'Build-up Rate'])
    axes[-1][-4].xaxis.set_major_formatter(date_format)
    plt.setp(axes[-1][-4].xaxis.get_majorticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")
    axes[-1][-4].set_title('Mean of Reflectivity ≥ 35dbz')

    # Product of cv reflectivity and area
    axes[-1][-3].plot_date(datetimes,
                           areaCVValuesNormalized,
                           linestyle='solid',
                           ms=2)
    axes[-1][-3].plot_date(datetimes[window // 2:-window // 2],
                           yAreaCVNormAvg,
                           linestyle='solid',
                           ms=2)
    axes[-1][-3].plot_date(
        np.array(datetimes[window // 2:-window // 2])[np.array(
            [yAreaCVNormExtrema[0], yAreaCVNormExtrema[1]])],
        yAreaCVNormAvg[np.array([yAreaCVNormExtrema[0],
                                 yAreaCVNormExtrema[1]])],
        linestyle="solid",
        ms=2)
    axes[-1][-3].legend(
        ['Area*cv_Ref Delta', 'Sm. Area*cv_Ref Delta', 'Build-up Rate'])
    axes[-1][-3].xaxis.set_major_formatter(date_format)
    plt.setp(axes[-1][-3].xaxis.get_majorticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")
    axes[-1][-3].set_title('Norm Product:\nCV Reflectivity * Area ≥ 35dbz')

    # Coeff. of Variance of Reflectivity ≥ 35dbz
    axes[-1][-2].plot_date(datetimes, cvValues, linestyle='solid', ms=2)
    axes[-1][-2].plot_date(datetimes[window // 2:-window // 2],
                           yCVAvg,
                           linestyle='solid',
                           ms=2)
    axes[-1][-2].legend(['CV Delta', 'Sm. CV Delta'])
    axes[-1][-2].xaxis.set_major_formatter(date_format)
    plt.setp(axes[-1][-2].xaxis.get_majorticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")
    axes[-1][-2].set_title('CV of Reflectivity ≥ 35dbz')

    # Testing plot
    axes[-1][-1].semilogy(xf[1:N // 2], 2.0 / N * np.abs(yf[1:N // 2]), '-b')
    axes[-1][-1].semilogy(xf[1:N // 2], 2.0 / N * np.abs(ywf[1:N // 2]), '-r')
    axes[-1][-1].legend(['FFT', 'FFT w. Window'])
    #axes[-1][-1].plot(xf, 2.0/N * np.abs(yf[0:N//2]),linestyle='solid', ms=2)
    #axes[-1][-1].plot_date(datetimes, yCVAvg, linestyle='solid')
    #axes[-1][-1].xaxis.set_major_formatter(date_format)
    plt.setp(axes[-1][-1].xaxis.get_majorticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")
    axes[-1][-1].set_title('Testing Plot (Frequency)')

    plt.tight_layout()
    plt.savefig(args.output + 'Nexrad.png')  # Set the output file name
    #plt.show()

    f_o = open(args.output + 'log_stats_area_nexrad.txt', 'a')
    f_o.write(datetimes[0].strftime("%Y%m%d%H%M%S") + '\t' +
              str(args.convLatLon) + '\t' + str(args.convBearing) + '\t' +
              str(args.scaleFactor) + '\t' + str(np.max(areaValues)) + '\t' +
              str(np.max(refValues)) + '\t' +
              str(slopeArea)  # std dev of LIS aligned data
              + '\t' + str(slopeRef) + '\t' + str(slopeProduct) + '\n')
    f_o.close()
Esempio n. 11
0
######################################################################
# Access the data in the AWS cloud. In this example, we're plotting data
# from the Evansville, IN radar, which had convection within its
# domain on 06/26/2019.
#

s3 = boto3.resource('s3',
                    config=Config(signature_version=botocore.UNSIGNED,
                                  user_agent_extra='Resource'))
bucket = s3.Bucket('noaa-nexrad-level2')
for obj in bucket.objects.filter(
        Prefix='2019/06/26/KVWX/KVWX20190626_221105_V06'):
    print(obj.key)

    # Use MetPy to read the file
    f = Level2File(obj.get()['Body'])

######################################################################
# Subset Data
# -----------
#
# With the file comes a lot of data, including multiple elevations and products.
# In the next block, we'll pull out the specific data we want to plot.
#

sweep = 0
# First item in ray is header, which has azimuth angle
az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
ref_range = np.arange(
Esempio n. 12
0
def load_file(filename):
    f = Level2File(str(directory / filename))
    return f
Esempio n. 13
0
def subset(s3_bucket, prefix):
    s3 = boto3.resource('s3',
                        config=Config(signature_version=botocore.UNSIGNED,
                                      user_agent_extra='Resource'))
    bucket = s3.Bucket(s3_bucket)
    for obj in bucket.objects.filter(Prefix=prefix):
        print(obj.key)

        # Use MetPy to read the file
        f = Level2File(obj.get()['Body'])

        sweep = 0
        # First item in ray is header, which has azimuth angle
        az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

        # ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
        # ref_range = np.arange(ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
        # ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])

        rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
        rho_range = (np.arange(rho_hdr.num_gates + 1) -
                     0.5) * rho_hdr.gate_width + rho_hdr.first_gate
        rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])

        # phi_hdr = f.sweeps[sweep][0][4][b'PHI'][0]
        # phi_range = (np.arange(phi_hdr.num_gates + 1) - 0.5) * phi_hdr.gate_width + phi_hdr.first_gate
        # phi = np.array([ray[4][b'PHI'][1] for ray in f.sweeps[sweep]])

        zdr_hdr = f.sweeps[sweep][0][4][b'ZDR'][0]
        zdr_range = (np.arange(zdr_hdr.num_gates + 1) -
                     0.5) * zdr_hdr.gate_width + zdr_hdr.first_gate
        zdr = np.array([ray[4][b'ZDR'][1] for ray in f.sweeps[sweep]])
        ref_norm, ref_cmap = ctables.registry.get_with_steps(
            'NWSReflectivity', 5, 5)

        # Plot the data!
        fig, axes = plt.subplots(1, 2, figsize=(15, 15))
        for var_data, var_range, colors, lbl, ax in zip(
            (rho, zdr), (rho_range, zdr_range), ('plasma', 'viridis'),
            ('RHO', 'ZDR (dBZ)'), axes.flatten()):
            # Turn into an array, then mask
            data = np.ma.array(var_data)
            data[np.isnan(data)] = np.ma.masked

            # Convert az,range to x,y
            xlocs = var_range * np.sin(np.deg2rad(az[:, np.newaxis]))
            ylocs = var_range * np.cos(np.deg2rad(az[:, np.newaxis]))

            # Define norm for reflectivity
            norm = ref_norm if colors == ref_cmap else None

            # Plot the data
            a = ax.pcolormesh(xlocs, ylocs, data, cmap=colors, norm=norm)

            divider = make_axes_locatable(ax)
            cax = divider.append_axes('right', size='5%', pad=0.05)
            fig.colorbar(a, cax=cax, orientation='vertical', label=lbl)

            ax.set_aspect('equal', 'datalim')
            ax.set_xlim(-100, 100)
            ax.set_ylim(-100, 100)
            add_timestamp(ax, f.dt, y=0.02, high_contrast=False)
        plt.suptitle('KVWX Level 2 Data', fontsize=20)
        plt.tight_layout()
        plt.plot()
        file_name = 'foo_' + str(int(time.time())) + '.png'
        plt.savefig(file_name)
        return send_imgur(file_name)
        # with open("foo.png", "rb") as imageFile:
        # 	str = base64.b64encode(imageFile.read())
        # 	return str
        # return bytes(az.tostring()+b'@'+rho_range.tostring()
        # +b'@'+rho.tostring())
        # return bytes(az.tostring())
Esempio n. 14
0
def test_conditional_radconst(fname, has_v2):
    """Test whether we're using the right volume constants."""
    f = Level2File(get_test_data(fname, as_file_obj=False))
    assert hasattr(f.sweeps[0][0][3], 'calib_dbz0_v') == has_v2
Esempio n. 15
0
def test_doubled_file():
    """Test for #489 where doubled-up files didn't parse at all."""
    data = get_test_data('Level2_KFTG_20150430_1419.ar2v').read()
    fobj = BytesIO(data + data)
    f = Level2File(fobj)
    assert len(f.sweeps) == 12
Esempio n. 16
0
def test_level2_fobj():
    """Test reading NEXRAD level2 data from a file object."""
    Level2File(get_test_data('Level2_KFTG_20150430_1419.ar2v'))
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER

from metpy.cbook import get_test_data
from metpy.io import Level2File
from metpy.plots import add_metpy_logo, add_timestamp, colortables
from metpy.calc import azimuth_range_to_lat_lon


###########################################

z_norm, z_cmap = colortables.get_with_range('NWSStormClearReflectivity', -30, 80)
v_norm, v_cmap = colortables.get_with_range('NWSVelocity', -30, 30)

# Open the file
name = get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False)
radar_file = Level2File(name)

# extract "constant" rda properties such as lon/lat.
# This is needed to map radar data to lon/lat.S
rda_name = radar_file.stid.decode('utf-8')
rda_info = radar_file.sweeps[0][0]
rda_lon = rda_info.vol_consts.lon
rda_lat = rda_info.vol_consts.lat

crs = ccrs.PlateCarree(central_longitude=rda_lon)

###########################################

def make_ticks(this_min,this_max):
    """
    Determines range of tick marks to plot based on a provided range of
Esempio n. 18
0
===================

Use MetPy to read information from a NEXRAD Level 2 (volume) file and plot
"""
import matplotlib.pyplot as plt
import numpy as np

from metpy.cbook import get_test_data
from metpy.io import Level2File
from metpy.plots import add_metpy_logo, add_timestamp

###########################################

# Open the file
name = get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False)
f = Level2File(name)

print(f.sweeps[0][0])

###########################################

# Pull data out of the file
sweep = 0

# First item in ray is header, which has azimuth angle
az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

# 5th item is a dict mapping a var name (byte string) to a tuple
# of (header, data array)
ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
ref_range = np.arange(
Esempio n. 19
0
def test_level2(fname, voltime, num_sweeps):
    """Test reading NEXRAD level 2 files from the filename."""
    f = Level2File(get_test_data(fname, as_file_obj=False))
    assert f.dt == voltime
    assert len(f.sweeps) == num_sweeps
Esempio n. 20
0
def produce(data, conn, client):
    Year = data['inputData']['Year']
    Month = data['inputData']['Month']
    Day = data['inputData']['Day']
    Radar = data['inputData']['Radar']
    uid = data['uid']
    inputData = data['inputData']
    userID = data["userID"]

    numberOfPlots = 1
    scans = conn.get_avail_scans(Year, Month, Day,
                                 Radar)  # year, month and day
    results = conn.download(scans[numberOfPlots - 1], 'templocation')

    # fig = plt.figure(figsize=(16,12))
    for i, scan in enumerate(results.iter_success(), start=1):
        #         ax = fig.add_subplot(1,1,i)
        #         radar = scan.open_pyart()

        #         display = pyart.graph.RadarDisplay(radar)
        #         display.plot('reflectivity',0,ax=ax,title="{} {}".format(scan.radar_id,scan.scan_time))
        #         display.set_limits((-150, 150), (-150, 150), ax=ax)

        sweep = 0
        name = scan.open()
        f = Level2File(name)
        # First item in ray is header, which has azimuth angle
        az = np.array([ray[0].az_angle for ray in f.sweeps[sweep]])

        # 5th item is a dict mapping a var name (byte string) to a tuple
        # of (header, data array)
        ref_hdr = f.sweeps[sweep][0][4][b'REF'][0]
        ref_range = np.arange(
            ref_hdr.num_gates) * ref_hdr.gate_width + ref_hdr.first_gate
        ref = np.array([ray[4][b'REF'][1] for ray in f.sweeps[sweep]])

        rho_hdr = f.sweeps[sweep][0][4][b'RHO'][0]
        rho_range = (np.arange(rho_hdr.num_gates + 1) -
                     0.5) * rho_hdr.gate_width + rho_hdr.first_gate
        rho = np.array([ray[4][b'RHO'][1] for ray in f.sweeps[sweep]])

        fig, axes = plt.subplots(1, 2, figsize=(15, 8))
        add_metpy_logo(fig, 190, 85, size='large')
        for var_data, var_range, ax in zip((ref, rho), (ref_range, rho_range),
                                           axes):
            # Turn into an array, then mask
            data = np.ma.array(var_data)
            data[np.isnan(data)] = np.ma.masked

            # Convert az,range to x,y
            xlocs = var_range * np.sin(np.deg2rad(az[:, np.newaxis]))
            ylocs = var_range * np.cos(np.deg2rad(az[:, np.newaxis]))

            # Plot the data
            ax.pcolormesh(xlocs, ylocs, data, cmap='viridis')
            ax.set_aspect('equal', 'datalim')
            ax.set_xlim(-40, 20)
            ax.set_ylim(-30, 30)
            add_timestamp(ax, f.dt, y=0.02, high_contrast=True)

        pltName = 'images/' + str(uid + str(i) + '.png')
        plt.savefig(pltName)
        plt.close(fig)
        uploadImage = im.upload_image(pltName, title="Uploaded with PyImgur")
        link = str(uploadImage.link)

        body = {
            "inputData": inputData,
            "outputData": link,
            "uid": uid,
            "userID": userID
        }

        with (client.topics['dataAnalysisConsumerF']
              ).get_sync_producer() as producer:
            producer.produce(bytes(json.dumps(body), 'utf-8'))
Esempio n. 21
0
def test_build19_level2_additions():
    """Test handling of new additions in Build 19 level2 data."""
    f = Level2File(get_test_data('Level2_KDDC_20200823_204121.ar2v'))
    assert f.vcp_info.vcp_version == 1
    assert f.sweeps[0][0].header.az_spacing == 0.5