Ejemplo n.º 1
0
    def add_datapt(self, value):
        '''
        Add a single value to the data bin

        >>> bd = BinnedData()
        >>> bd.add_datapt(3.2)
        >>> print(bd.databin_dict)
        {'4.0': 1}
        >>> bd.add_datapt(1.3)
        >>> bd.databin_dict == {'2.0': 1, '4.0': 1}
        True
        '''

        #Firstly figure out which bin it should go in.
        binvalue = self.calc_bin(value)

        #Avoid precision errors in the dictionary keywords by using a consistent
        #format string based on the number of decimal points in the required
        #binsize.
        if self.binsize_fmtstr is None:
            binsize_ndp = abs(decimal.Decimal(str(self.binsize)).as_tuple().exponent)
            if binsize_ndp < 6:
                self.binsize_fmtstr = '{:.' + str(binsize_ndp)+'f}'
            else:
                self.binsize_fmtstr = '{:.3e}'

        #Then add to dictionary of data
        if binvalue not in self.databin_dict:
            self.databin_dict[self.binsize_fmtstr.format(binvalue)] = 1
        else:
            self.databin_dict[self.binsize_fmtstr.format(binvalue)] += 1
Ejemplo n.º 2
0
 def findOrganismById(self, id_number):
     orgs = self.findAllOrganisms()
     orgs = [x for x in orgs if str(x['id']) == str(id_number)]
     if len(orgs) == 0:
         raise Exception("Unknown ID")
     else:
         return orgs[0]
Ejemplo n.º 3
0
 def findKeyById(self, id_number):
     keys = self.findAllKeys()
     keys = [x for x in keys if str(x['id']) == str(id_number)]
     if len(keys) == 0:
         raise Exception("Unknown ID")
     else:
         return keys[0]
Ejemplo n.º 4
0
 def findValueById(self, id_number):
     values = self.findAllValues()
     values = [x for x in values if str(x['id']) == str(id_number)]
     if len(values) == 0:
         raise Exception("Unknown ID")
     else:
         return values[0]
Ejemplo n.º 5
0
 def findCommentById(self, id_number):
     comments = self.findAllComments()
     comments = [x for x in comments if str(x['id']) == str(id_number)]
     if len(comments) == 0:
         raise Exception("Unknown ID")
     else:
         return comments[0]
Ejemplo n.º 6
0
 def findStatusById(self, id_number):
     statuses = self.findAllStatuses()
     statuses = [x for x in statuses if str(x['id']) == str(id_number)]
     if len(statuses) == 0:
         raise Exception("Unknown ID")
     else:
         return statuses[0]
Ejemplo n.º 7
0
    def __str__(self):
        """
        Output from 'print'
        """
        string = str(type(self)) + '\n'
        keys = sorted(self.__dict__.keys())
        for key in keys:
            string += key + ': ' + str(self.__dict__[key]) + '\n'

        return string
Ejemplo n.º 8
0
def yearmonthname_from_yearmonth(coord, point):
    """
    Category function to calculate year and month name (month Year eg
    'Feb 2014') given yearmonth (integer in format YYYYmm, eg 201402),
    for use in add_categorised_coord.
    """
    year = int(str(point)[:4])
    month = int(str(point)[4:6])
    #Use arbitary date (1st) within month as these are not used

    return datetime.datetime(year, month, 1).strftime('%b %Y')
Ejemplo n.º 9
0
    def calculate_dates(self):
        """
        Calculate unknown dates from known variables
        (range_days, start_date, end_date), also convert to date-time formats.
        If none of range_days, start_date or end_date are given a warning is
        raised and no date information is added to dictionary.
        Otherwise, requires at least two from range_days, start_date, end_date
        """

        if 'start_date' in self:
            self['start_date'] = str(self['start_date'])
            self['start_datetime'] = datetime.strptime(self['start_date'],
                                                       "%Y%m%d")
        if 'end_date' in self:
            self['end_date'] = str(self['end_date'])
            self['end_datetime'] = datetime.strptime(self['end_date'], "%Y%m%d")

        if 'range_days' in self:
            if 'start_date' in self:
                #Check for consistency - calculate expected end date first
                end_datetime = self['start_datetime'] \
                               + timedelta(days=self['range_days'])
                if 'end_date' in self:
                    if end_datetime != self['end_datetime']:
                        raise ValueError("end_date is not consistent with"
                                         "start_date+range_days")
                else:
                    #Set end_date, end_datetime
                    self['end_datetime'] = end_datetime
                    self['end_date'] = end_datetime.strftime("%Y%m%d")

            elif 'end_date' in self:
                #Calculate start_date:
                start_datetime = self['end_datetime'] \
                                 - timedelta(days=self['range_days'])
                self['start_datetime'] = start_datetime
                self['start_date'] = start_datetime.strftime("%Y%m%d")

            else:
                warnings.warn("start and end dates not set: Need 2 of"
                              "range_days, start_date and end_date")

        elif 'start_date' in self:
            if 'end_date' in self:
                self['range_days'] = (self['end_datetime'] \
                                      - self['start_datetime']).days
            else:
                warnings.warn("end date not set: Need 2 of "
                              "range_days, start_date and end_date")

        elif 'end_date' in self:
            warnings.warn("start date not set: Need 2 of "
                          "range_days, start_date and end_date")
Ejemplo n.º 10
0
    def __str__(self):
        """
        Output from 'print'
        """
        string = str(type(self)) + '\n'
        keys = sorted(self.__dict__.keys())
        for key in keys:
            value = self.__dict__[key]
            if key == 'cmap':
                value = value.name + ' (' + str(value) + ')'
            string += key + ': ' + str(value) + '\n'

        return string
Ejemplo n.º 11
0
    def add_text_file(self, filename, width='100%', height='50%'):
        """
        Include a text file in webpage in a frame (allows scrolling).

        :param filename: Filename of text file to be added
        :param width: Width of frame on page. Can be in pixels (eg 500),
                      or as percentage of the page width (eg 100%).
        :param height: Height of frame on page. Can be in pixels (eg 500),
                      or as percentage of the page height (eg 100%).

        """

        self.body += '<iframe width=' + str(width) + ' height=' + str(
            height) + ' '
        self.body += 'src="' + filename + '"></iframe>'
Ejemplo n.º 12
0
    def get_histarray(self):
        '''
        Converts databin dictionary into an array of bin edges
        and histogram totals

        >>> bd = BinnedData()
        >>> bd.databin_dict = {'1.0':2, '3.0':1, '4.0':1}
        >>> binedges, hist = bd.get_histarray()
        >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
        >>> print(binedges)
        [ 1.00  2.00  3.00  4.00]
        >>> print(hist)
        [ 2.00  0.00  1.00  1.00]
        >>> np.set_printoptions()
        '''

        #Generate full array of all possible bin edges in range
        minval = min([float(k) for k in self.databin_dict.keys()])
        maxval = max([float(k) for k in self.databin_dict.keys()])
        binedges = np.arange(minval,
                             maxval+self.binsize,
                             self.binsize)

        hist = np.zeros((len(binedges)))
        for i, binedge in enumerate(binedges):
            binedge = str(binedge)
            if binedge in self.databin_dict:
                hist[i] = self.databin_dict[binedge]

        return binedges, hist
Ejemplo n.º 13
0
def retrieve_yearly_web(year, sites_data, yearly_dir):
    """
    Retrieve yearly observation file from AURN website, UK-AIR.
    This retrieves all available species.

    :param year: float, year to retrieve
    :param sites_data: numpy ndarray containing site information data
                       from a :class:`sites_info.SitesInfo` object.
    :param yearly_dir: string, directory to retrieve into

    """

    if not os.path.isdir(yearly_dir):
        print('Creating output yearly directory:', yearly_dir)
        os.makedirs(yearly_dir)

    for abbrev in sites_data['abbrev']:
        print('Retrieving ' + abbrev + '...')
        url = 'http://uk-air.defra.gov.uk/datastore/data_files/site_data/'
        filename = abbrev + '_' + str(year) + '.csv'
        #Delete pre-existing file first
        full_filename = yearly_dir + '/' + filename
        if os.path.isfile(full_filename):
            os.remove(full_filename)
        #Issue wget command to retrieve.
        #Note -q is used to quiet the output - if trying to debug, then
        #remove this option!
        cmd = ('/usr/bin/wget -q --no-check-certificate --directory-prefix=' +
               yearly_dir + ' ' + url + '/' + filename)
        return_code = adaqcode.shell_commands.call_shell(cmd)
        if return_code == 0:
            print('File retrieved: ' + full_filename)
        else:
            print('Error retrieving: ' + filename + ' (file may not exist)')
Ejemplo n.º 14
0
def read_regime_txt(filein):
    """
    Read in dates and regimes from text file into numpy 2D array.

    :param filein: .txt file with columns year, month, day, regime.
    :return: numpy 2D array of dates and weather regimes.

    >>> import config
    >>> sample_data_path = config.SAMPLE_DATADIR+'weather_regimes/'
    >>> wr = sample_data_path + 'daily_30regimes_since2010.txt'
    >>> regime_dates = read_regime_txt(wr) # doctest: +ELLIPSIS
    Getting regime data from  .../daily_30regimes_since2010.txt
    >>> print(regime_dates.shape) # doctest: +ELLIPSIS
    (..., 2)
    >>> print(regime_dates[0,0])
    2010-01-01
    >>> print(regime_dates[0,1])
    19
    """

    print('Getting regime data from ', filein)

    #Read in just first 4 columns of .txt file - year, month, day, regime
    data = np.genfromtxt(filein,
                         dtype=None,
                         usecols=(0, 1, 2, 3),
                         names='year, month, day, regime')

    #Initialise empty 2D array for date and weather regime
    regime_dates = np.empty((len(data), 2), dtype=object)

    #Collate year, month, day to form a date string
    for i, datum in enumerate(data):
        date_string = str((datum['year']))
        if datum['month'] < 10:
            date_string = date_string + '0'
        date_string = date_string + str((datum['month']))
        if datum['day'] < 10:
            date_string = date_string + '0'
        date_string = date_string + str((datum['day']))

        #Convert yyyymmdd date string to date format
        #Populate array with formatted date and regime number
        regime_dates[i, 0] = ffdate2date(str(date_string))
        regime_dates[i, 1] = datum['regime']

    return regime_dates
Ejemplo n.º 15
0
def PlumeandMet(workdir, metDir):

    times = ['201303060600']

    for time in times:
        filenames = glob.glob(workdir + '*' + time + '.txt')
        filename = filenames[0]
        attConstraint = iris.AttributeConstraint(Name='TotalAC')
        cube = iris.load_cube(filename, attConstraint)
        conc = cube

        filename = metDir + '*' + time + '.txt'
        precip = iris.load_cube(filename)

        colorscale = ('#ffffff', '#b4dcff', '#04fdff', '#00ff00', '#fdff00',
                      '#ffbd02', '#ff6a00', '#fe0000', '#0000FF', '#800080',
                      '#008000')

        # Set up axes
        ax = plt.axes(projection=ccrs.PlateCarree())
        ax.set_extent([-23, -13, 63, 67])

        # Set up country outlines
        countries = cfeature.NaturalEarthFeature(category='cultural',
                                                 name='admin_0_countries',
                                                 scale='10m',
                                                 facecolor='none')
        ax.add_feature(countries, edgecolor='black', zorder=2)

        # Set-up the gridlines
        gl = ax.gridlines(draw_labels=True, linewidth=0.8, alpha=0.9)

        gl.xlabels_top = False
        gl.ylabels_right = False
        gl.xlocator = mticker.FixedLocator(
            [-23, -22, -21, -20, -19, -18, -17, -16, -15, -14])
        gl.ylocator = mticker.FixedLocator([63, 64, 65, 66, 67])
        gl.xformatter = LONGITUDE_FORMATTER
        gl.yformatter = LATITUDE_FORMATTER

        # Plot
        cf1 = iplt.contourf(precip,
                            levels=[0.0, 0.01, 0.1, 1.0, 10],
                            colors=colorscale)

        cf = iplt.contour(conc,
                          levels=[
                              1e-9, 3.16e-8, 1e-8, 3.16e-7, 1e-7, 3.16e-6,
                              1e-6, 3.16e-5, 1e-5
                          ])

        cb = plt.colorbar(cf1, orientation='horizontal', shrink=0.9)
        cb.set_label(str(precip.units))
        plt.title(
            'Precipitation (coloured) and Air Concentration (contoured) \n' +
            time,
            fontsize=12)

        plt.show()
Ejemplo n.º 16
0
def units_str(units):
    """
    Converts units to nice formatted units string.
    """
    unitsstr = str(units)
    if unitsstr in LATEX:
        unitsstr = LATEX[unitsstr]
    return unitsstr
Ejemplo n.º 17
0
    def testPersist(self):

        # create file
        file1 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1024)
        self.assertEqual(TestCacheMetaData.TEST_CACHE_PATH +
                         "file1", file1.metaDataFile)
        self.assertEqual(10, file1.bitmap.length())
        file1.setReadBlocks(2, 5)
        self.assertEqual(4, file1.bitmap.count_bits())
        self.assertEqual("0011110000", str(file1.bitmap))
        self.assertEqual(0x2345, file1.md5sum)
        self.assertEqual(1024, file1.size)

        file1.persist()

        file2 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1024)
        self.assertEqual(4, file2.bitmap.count_bits())
        self.assertEqual("0011110000", str(file2.bitmap))
        self.assertEqual(0x2345, file2.md5sum)
        self.assertEqual(1024, file2.size)

        # same file name, different md5 => reset bitmap
        file3 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x23456, 1025)
        self.assertEqual(0, file3.bitmap.count_bits())
        self.assertEqual("0000000000", str(file3.bitmap))
        self.assertEqual(0x23456, file3.md5sum)
        self.assertEqual(1025, file3.size)

        CacheMetaData.deleteCacheMetaData(
            TestCacheMetaData.TEST_CACHE_PATH + "file1")
        file4 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1025)
        self.assertEqual(0, file4.bitmap.count_bits())
        self.assertEqual("0000000000", str(file4.bitmap))
        self.assertEqual(0x2345, file4.md5sum)
        self.assertEqual(1025, file4.size)

        file4.setReadBlocks(-8, -5)
        self.assertEqual("0011110000", str(file4.bitmap))
Ejemplo n.º 18
0
    def testPersist(self):

        # create file
        file1 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1024)
        self.assertEquals(TestCacheMetaData.TEST_CACHE_PATH + "file1",
                          file1.metaDataFile)
        self.assertEquals(10, file1.bitmap.length())
        file1.setReadBlocks(2, 5)
        self.assertEquals(4, file1.bitmap.count_bits())
        self.assertEquals("0011110000", str(file1.bitmap))
        self.assertEquals(0x2345, file1.md5sum)
        self.assertEquals(1024, file1.size)

        file1.persist()

        file2 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1024)
        self.assertEquals(4, file2.bitmap.count_bits())
        self.assertEquals("0011110000", str(file2.bitmap))
        self.assertEquals(0x2345, file2.md5sum)
        self.assertEquals(1024, file2.size)

        # same file name, different md5 => reset bitmap
        file3 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x23456, 1025)
        self.assertEquals(0, file3.bitmap.count_bits())
        self.assertEquals("0000000000", str(file3.bitmap))
        self.assertEquals(0x23456, file3.md5sum)
        self.assertEquals(1025, file3.size)

        CacheMetaData.deleteCacheMetaData(TestCacheMetaData.TEST_CACHE_PATH +
                                          "file1")
        file4 = CacheMetaData(TestCacheMetaData.TEST_CACHE_PATH + "file1", 10,
                              0x2345, 1025)
        self.assertEquals(0, file4.bitmap.count_bits())
        self.assertEquals("0000000000", str(file4.bitmap))
        self.assertEquals(0x2345, file4.md5sum)
        self.assertEquals(1025, file4.size)

        file4.setReadBlocks(-8, -5)
        self.assertEquals("0011110000", str(file4.bitmap))
Ejemplo n.º 19
0
def parse_bool(s):
    if s is None:
        return False

    val = str(s).lower().strip()
    if "#" in val:
        val = val.split("#")[0].strip()
    if val.lower() in ("t", "true", "yes", "1"):
        return True
    elif val.lower() in ("f", "false", "no", "0"):
        return False
    else:
        raise ValueError("Can't parse '%s' as boolean" % s)
Ejemplo n.º 20
0
    def __get_species_dict(self, data, dt_array, site_id_array):
        """
        Step 3:
        Set up an array of size nsites x ntimes for each species
        Loop through all data again to populate this array.
        Any missing data is left as np.nan
        (Not all sites have data for all species)

        :returns: species_dict which is a dictionary whose keys are
                  species.
                  Each value is an array of size nsites x ntimes,
                  which is filled with data values.
        """
        species_dict = {}
        for line in data:

            if self.short_name_list is not None:
                short_name = CAMSAQOBS_2_SHORTNAME[line['PARAMETER']]
                if short_name not in self.short_name_list:
                    #Don't need this line
                    continue

            if self.abbrev_prefix_list is not None:
                if not str(line['STATION']).startswith( \
                    tuple(self.abbrev_prefix_list)):
                    #Don't need this site/line
                    continue

            if self.sites_data is not None:
                #Limit by abbreviation/station name
                if line['STATION'] not in self.sites_data['abbrev']:
                    #Don't need this site/line
                    continue

            if line['PARAMETER'] not in species_dict:
                species_dict[line['PARAMETER']] = np.zeros(
                    (len(site_id_array), len(dt_array)))
                species_dict[line['PARAMETER']][:] = np.nan

            site_id = adaq_data.generate_siteids([line['LON']],
                                                 [line['LAT']])[0]
            i_site_id = np.where(site_id_array == site_id)[0]

            dt = datetime.datetime(line['YEAR'], line['MONTH'], line['DAY'],
                                   line['HOUR'])
            i_dt = np.where(dt_array == dt)[0]

            species_dict[line['PARAMETER']][i_site_id, i_dt] = \
                                                       line['CONCENTRATIONkgm3']

        return species_dict
Ejemplo n.º 21
0
    def add_footer(self,
                   operational_warning=False,
                   name=None,
                   desk_number=None,
                   telephone=None,
                   email=None):
        """
        Add footer to website.
        This must be included at the end of any webpage.

        :param operational_warning: Display warning in red, "WARNING: This page
                                    is not supported operationally."
        :param name: Real name to add to address section and to
                     who modified last.
        :param desk_number: Desk number to add to address section
        :param telephone: Telephone number to add to address section
        :param email: Email address to add to address section.


        ..Note:: This can be overwritten by setting self.footer, but should
                 still end with '</body>\\n</html>'

        """

        footer = '\n<p><hr><p>\n'

        if operational_warning:
            footer += ('<font color="#990000"> WARNING: This page is not '
                       'supported operationally.</font>\n')

        footer += ('<br><I>Modified:</I> <B><!--#config timefmt="%d %b %Y"'
                   '--><!--#echo var="LAST_MODIFIED"--> </B>')
        if name is not None:
            footer += '<i>by</i><b> ' + name + '</b>'
        footer += '\n<p><hr><p>\n'

        footer += '<address>\n'
        if name is not None:
            footer += name + '<br>'
        if desk_number is not None:
            footer += 'Location: ' + desk_number + '<br>'
        if telephone is not None:
            footer += 'Telephone: ' + str(telephone) + '<br>'
        if email is not None:
            footer += 'Email: <a href="mailto:' + email + '">'
            footer += email + '</a>'
        footer += '</address>\n'

        footer += '</body>\n</html>'

        self.footer = footer
Ejemplo n.º 22
0
    def __read_header(self, filename):
        """
        Read in file to generate required headers and to alter the dtype of
        the data to ensure strings are read in as unicode data instead of bytes.

        :param filename: filename to read
        :returns: (headers, dtypes) - Where headers is a list of required header
                  names with those from the file reduced to lower case and some
                  keys ones renamed for consistency (eg from 'lat' to
                  'latitude'). dtypes is a list of tuples - one tuple for each
                  header, where the first value is the header name and the
                  second value is the data type, where strings are modified to
                  be unicode (instead of bytes, which is the default for
                  python3).

        """

        data = np.genfromtxt(filename, dtype=None, names=True)

        headers = []
        dtypes = []

        for name in data.dtype.names:

            #Convert header names to lowercase and modify to required names
            #where obvious
            header = name.lower()
            if header == 'lat':
                header = 'latitude'
            elif header == 'lon':
                header = 'longitude'
            elif header == 'altit':
                header = 'site_altitude'
            elif header == 'name':
                header = 'site_name'
            elif header == 'type':
                header = 'site_type'
            headers.append(header)

            #Check if loading a string - if so, will need to convert to unicode
            dtype = str(data[name].dtype)
            if dtype[:2] == '|S':
                #Set up new dtype, which is a tuple for each header name,
                #(header, dtype). Replace any strings str of same length.
                #Note str is bytes for python2 and unicode for python3.
                dtypes.append((header, str, int(dtype[2:])))
            else:
                dtypes.append((header, dtype))

        return headers, dtypes
Ejemplo n.º 23
0
    def __read_sites_classes(self):
        """
        Read site classifications file into self.classes_data.
        Also set up self.abbrev2latlon as a dictionary to convert between
        site codes (abbrevs) and (lat,lon) - this will be used to avoid having
        two sites with same site code, but slightly different locations.

        >>> od = CAMSAQObsData()
        >>> od._CAMSAQObsData__read_sites_classes()
        >>> print(len(list(od.abbrev2latlon.keys())))
        4298
        >>> print(od.abbrev2latlon['AL0201A'])
        ('41.3303', '19.8218')
        """
        if self.classes_file is None:
            self.classes_file = config.SAMPLE_DATADIR + \
                                'CAMSAQ_obs/classes_MACC2013.txt'

        #Read data in initially to determine data types
        data = np.genfromtxt(self.classes_file, dtype=None, names=True)

        #Convert string data types to str
        dtypes = []
        for name in data.dtype.names:
            dtype = data.dtype[name]
            if str(dtype)[:2] == '|S':
                dtypes.append((name, str, int(str(dtype)[2:])))
            else:
                dtypes.append((name, dtype))

        #Read in a second time with corrected dtypes
        self.classes_data = np.genfromtxt(self.classes_file,
                                          dtype=dtypes,
                                          names=True)

        for site in self.classes_data:
            self.abbrev2latlon[site['code']] = (site['lat'], site['lon'])
Ejemplo n.º 24
0
def readAURNffobs(cube_dt, species, sitelist=None, obsdir=None):
    '''Read in observations from reformatted AURN text file'''

    if sitelist == None:
        sitelist = np.genfromtxt(
            '/home/h03/apdg/AQUM/AQcases_fcm/code/aq_sites_all.txt',
            dtype=None,
            names=True)
    if obsdir == None:
        obsdir = '/data/nwp1/apdl/AQobs/' + str(cube_dt.year) + '_reformatted/'

    lats = []
    lons = []
    values = []
    #Loop through sites
    for iabbrev, abbrev in enumerate(sitelist['Abbrev']):
        files = glob.glob(obsdir + abbrev + '_*')
        #Check if file exists - extract data if so
        if len(files) == 1:
            obsdata = np.genfromtxt(files[0], dtype=None, names=True)
            #Find date that matches cube
            for idata, data in enumerate(obsdata['date']):
                date = str(obsdata['date'][idata])
                time = str(obsdata['time'][idata])
                dt = fixfmt2dt(date, time)
                if dt == cube_dt and np.isfinite(obsdata[species][idata]):
                    #Pull out data value if not nan
                    print(abbrev, sitelist['Lat'][iabbrev],
                          sitelist['Lon'][iabbrev])
                    print(obsdata[species][idata])
                    lats.append(sitelist['Lat'][iabbrev])
                    lons.append(sitelist['Lon'][iabbrev])
                    values.append(obsdata[species][idata])
                    break

    return lats, lons, values
Ejemplo n.º 25
0
    def add_image(self, plotfilename, width=None):
        """
        Add an image - clicking on it will open it up larger

        :param plotfilename: Filename of plot to display.
                             Note this should be as a web address, or
                             as a relative link to self.html_dir
        :param width: Width of image on page. Can be in pixels (eg 500),
                      or as percentage of the page width (eg 100%).
        """

        self.body += '<a href="' + plotfilename + '"><img '
        if width is not None:
            self.body += 'width=' + str(width) + ' '
        self.body += 'src="' + plotfilename + '"> </a>'
Ejemplo n.º 26
0
    def add_cube(self, cube):
        '''
        Add entire iris cube of data to the data bin

        >>> import iris
        >>> cube = iris.cube.Cube(np.arange(10)/2.)
        >>> cube.data[3:6] = 0.
        >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
        >>> print(cube.data)
        [ 0.00  0.50  1.00  0.00  0.00  0.00  3.00  3.50  4.00  4.50]
        >>> np.set_printoptions()
        >>> bd = BinnedData()
        >>> bd.add_cube(cube)
        >>> bd.databin_dict == {'0.0': 4, '1.0': 2, '3.0': 1,
        ... '4.0': 2, '5.0': 1}
        True
        '''


        #for value in cube.data:
        #    self.add_datapt(value)


        mindata = np.nanmin(cube.data)
        maxdata = np.nanmax(cube.data)
        #Figure out which bin these should go in.
        minbin = self.calc_bin(mindata)
        maxbin = self.calc_bin(maxdata)

        #Now generate all possible bins in this range
        bin_edges = np.arange(minbin-self.binsize, maxbin+self.binsize,
                              self.binsize)

        #Now use pandas cut and value_counts to generate binned data histogram
        #where right handsize value of bin is included and left handside is not.
        out = pd.cut(cube.data.flatten(), bin_edges, labels=bin_edges[1:])
        counts = pd.value_counts(out)
        for index, value in zip(counts.index, counts.values):
            binvalue = str(index)
            #Only store data in bin if actually any counts
            #(values) for this binvalue
            if value > 0:
                if binvalue not in self.databin_dict:
                    self.databin_dict[binvalue] = value
                else:
                    self.databin_dict[binvalue] += value
Ejemplo n.º 27
0
    def write_to_file(self, outputfile):
        """
        Write information from sites cube out to a sites file.
        Will write out in the same (or very similar) format as the file
        that is read in.

        :param outputfile: File to write out to.
        """

        if self.data is None:
            raise IOError("Object does not have any data")

        #Get headers
        headers = list(self.data.dtype.names)
        header_str = len(headers) * "%s " % tuple(headers)

        #Get format string for each column
        format_str = ""
        for header in headers:
            dtype_str = self.data.dtype.fields[header][0].str[1:]

            if header in [
                    'latitude', 'longitude', 'grid_latitude', 'grid_longitude'
            ]:
                format_str += '%14.9f'
            elif header == 'site_id':
                format_str += '%21.10f'
            elif (dtype_str[0] == 'S') or (dtype_str[0] == 'U'):
                format_str += '%-' + dtype_str[1:] + 's'
            elif dtype_str[0] == 'f':
                format_str += '%.' + str(2 + int(dtype_str[1:])) + 'f'
            elif dtype_str[0] == 'i':
                format_str += '%' + dtype_str[1:] + 'd'

            format_str += ' '
        format_str += '\n'

        #Now output to file
        with open(outputfile, 'w') as fout:

            fout.write(header_str + "\n")

            for site in self.data:
                fout.write(format_str % tuple(site))

        print("Written to file " + outputfile)
Ejemplo n.º 28
0
 def get_or_create_calendar_for_object(self, obj, distinction=None, name=None):
     """
     >>> user = User(username="******")
     >>> user.save()
     >>> calendar = Calendar.objects.get_or_create_calendar_for_object(user, name = "Jeremy's Calendar")
     >>> calendar.name
     "Jeremy's Calendar"
     """
     try:
         return self.get_calendar_for_object(obj, distinction)
     except Calendar.DoesNotExist:
         if name is None:
             name = str(obj)
         slug = slugify(name)
         calendar = self.create(name=name, slug=slug)
         calendar.create_relation(obj, distinction)
         return calendar
Ejemplo n.º 29
0
 def get_params(self):
     """
     >>> rule = Rule(params = "count:1;bysecond:1;byminute:1,2,4,5")
     >>> rule.get_params()
     {'count': 1, 'byminute': [1, 2, 4, 5], 'bysecond': 1}
     """
     if self.params is None:
         return {}
     params = self.params.split(';')
     param_dict = []
     for param in params:
         param = param.split(':')
         if len(param) == 2:
             param = (str(param[0]), [int(p) for p in param[1].split(',')])
             if len(param[1]) == 1:
                 param = (param[0], param[1][0])
             param_dict.append(param)
     return dict(param_dict)
Ejemplo n.º 30
0
 def get_params(self):
     """
     >>> rule = Rule(params = "count:1;bysecond:1;byminute:1,2,4,5")
     >>> rule.get_params()
     {'count': 1, 'byminute': [1, 2, 4, 5], 'bysecond': 1}
     """
     if self.params is None:
         return {}
     params = self.params.split(';')
     param_dict = []
     for param in params:
         param = param.split(':')
         if len(param) == 2:
             param = (str(param[0]), [int(p) for p in param[1].split(',')])
             if len(param[1]) == 1:
                 param = (param[0], param[1][0])
             param_dict.append(param)
     return dict(param_dict)
Ejemplo n.º 31
0
def generate_abbrevs(sitesdict):
    '''Generates unique site abbreviations for each site returns as a dictionary'''
    siteabbrevs = {}
    for site in sitesdict.keys():
        if site != '                         ':
            abbrev = site[0:4]
            abbrev = abbrev.replace(' ', '')
            abbrev = abbrev.replace('.', '')
            abbrev = abbrev.replace('-', '')
            count = 0
            while abbrev in siteabbrevs:
                if count == 0:
                    abbrev = abbrev[0:3]
                else:
                    abbrev = abbrev[0:3] + str(count)
                count += 1
            abbrev = 'm' + abbrev
            siteabbrevs[abbrev] = site
    return siteabbrevs
Ejemplo n.º 32
0
 def get_or_create_calendar_for_object(self, obj, distinction=None, name=None):
     """
     >>> user = User(username="******")
     >>> user.save()
     >>> calendar = Calendar.objects.get_or_create_calendar_for_object(user, name = "Jeremy's Calendar")
     >>> calendar.name
     "Jeremy's Calendar"
     """
     try:
         return self.get_calendar_for_object(obj, distinction)
     except Calendar.DoesNotExist:
         if name is None:
             calendar = Calendar(name=str(obj))
         else:
             calendar = Calendar(name=name)
         calendar.slug = slugify(calendar.name)
         calendar.save()
         calendar.create_relation(obj, distinction)
         return calendar
Ejemplo n.º 33
0
def parse_params(paramstring):
    """
    Returns a dictionary parsed from a semicolon-delimited set of rrules

    "count:1;bysecond:1;byminute:1,2,4,5" returns
    {'count': 1, 'byminute': [1, 2, 4, 5], 'bysecond': 1}
    """
    if paramstring.strip() == "":
        return {}
    params = paramstring.split(";")
    param_dict = []
    for param in params:
        if param.strip() == "":
            continue  # skip blanks
        param = param.split(":")
        if len(param) == 2:
            param = (str(param[0]), [int(p) for p in param[1].split(",")])
            if len(param[1]) == 1:
                param = (param[0], param[1][0])
            param_dict.append(param)
    return dict(param_dict)
Ejemplo n.º 34
0
def parse_params(paramstring):
    """
    Returns a dictionary parsed from a semicolon-delimited set of rrules

    "count:1;bysecond:1;byminute:1,2,4,5" returns
    {'count': 1, 'byminute': [1, 2, 4, 5], 'bysecond': 1}
    """
    if paramstring.strip() == "":
        return {}
    params = paramstring.split(';')
    param_dict = []
    for param in params:
        if param.strip() == "":
            continue  # skip blanks
        param = param.split(':')
        if len(param) == 2:
            param = (str(param[0]), [int(p) for p in param[1].split(',')])
            if len(param[1]) == 1:
                param = (param[0], param[1][0])
            param_dict.append(param)
    return dict(param_dict)
Ejemplo n.º 35
0
    def testRange(self):
        """ To test getRange functionality """

        # first file
        file1 = CacheMetaData(
            TestCacheMetaData.TEST_CACHE_PATH + "file1", 10, 0x2345, 1025)
        self.assertEqual(TestCacheMetaData.TEST_CACHE_PATH +
                         "file1", file1.metaDataFile)
        self.assertEqual(10, file1.bitmap.length())
        self.assertEqual(0x2345, file1.md5sum)
        self.assertEqual(1025, file1.size)

        # bitvector should be "0000000000"
        self.assertEqual(0, file1.getNumReadBlocks())
        self.assertEqual(10, file1.bitmap.length())

        file1.setReadBlock(1)
        self.assertEqual(1, file1.getBit(1))
        self.assertEqual(1, file1.bitmap.count_bits())
        self.assertEqual("0100000000", str(file1.bitmap))

        file1.setReadBlock(5)
        self.assertEqual(1, file1.getBit(5))
        self.assertEqual(2, file1.bitmap.count_bits())
        self.assertEqual(5, file1.getNextReadBlock(2))
        self.assertEqual(5, file1.getNextReadBlock(5))
        self.assertEqual("0100010000", str(file1.bitmap))

        # perform some tests on 0100010
        self.assertEqual((0, 0), file1.getRange(0, 0))
        self.assertEqual((2, 4), file1.getRange(2, 5))
        # same thing using negative index
        self.assertEqual((2, 4), file1.getRange(2, -5))
        self.assertEqual((2, 4), file1.getRange(-8, -5))
        self.assertEqual((2, 4), file1.getRange(-8, 5))

        file1.setReadBlock(2)
        file1.setReadBlock(4)
        self.assertEqual("0110110000", str(file1.bitmap))
        self.assertEqual((3, 3), file1.getRange(2, 5))

        file1.setReadBlock(3)
        self.assertEqual("0111110000", str(file1.bitmap))
        self.assertEqual((None, None), file1.getRange(2, 5))

        expected = 0
        try:
            file1.getRange(8, -7)
        except ValueError:
            expected = 1

        self.assertEqual(1, expected)

        expected = 0
        try:
            file1.setReadBlocks(8, -7)
        except ValueError:
            expected = 1

        file1_repr = str("CacheMetaData(metaDataFile='{}file1', "
                         "blocks=10, md5sum=9029, size=1025)".
                         format(TestCacheMetaData.TEST_CACHE_PATH))
        # avoid repr limiting the size of the return representation
        aRepr.maxother = len(file1_repr)
        self.assertEqual(str(file1_repr), repr(file1))

        self.assertEqual(1, expected)
        file1.persist()
        self.assertTrue(os.path.exists(
            TestCacheMetaData.TEST_CACHE_PATH + "file1"))
        file1.delete()
        self.assertTrue(not os.path.exists(
            TestCacheMetaData.TEST_CACHE_PATH + "file1"))

        file2 = CacheMetaData(
            TestCacheMetaData.TEST_CACHE_PATH + "/test/file1",
            10, 0x2345, 1024)
        self.assertEqual(TestCacheMetaData.TEST_CACHE_PATH +
                         "/test/file1", file2.metaDataFile)
        self.assertEqual(10, file2.bitmap.length())
        self.assertEqual(0x2345, file2.md5sum)
        file2.persist()
        file3 = CacheMetaData(
            TestCacheMetaData.TEST_CACHE_PATH + "/test/file1",
            10, 0x2345, 1025)
        self.assertEqual(TestCacheMetaData.TEST_CACHE_PATH +
                         "/test/file1", file3.metaDataFile)
        self.assertEqual(10, file3.bitmap.length())
        self.assertEqual(0x2345, file3.md5sum)
from six.moves import builtins

c1 = complex()
d1 = dict()
f1 = float()
i1 = int()
l1 = list()
s1 = str()
t1 = tuple()

c2 = builtins.complex()
d2 = builtins.dict()
f2 = builtins.float()
i2 = builtins.int()
l2 = builtins.list()
s2 = builtins.str()
t2 = builtins.tuple()
Ejemplo n.º 37
0
 def write_link_construct(self, handler, link):
     if 'length' in link:
         link['length'] = str(link['length'])
     handler.addQuickElement(u'link', None, link)
Ejemplo n.º 38
0
 def item_summary(self, item):
     return str(item)
Ejemplo n.º 39
0
 def item_uid(self, item):
     return str(item.id)