Exemplo n.º 1
0
def get_lytaf_event_types(lytaf_path=None, print_event_types=True):
    """Prints the different event types in the each of the LYTAF databases.

    Parameters
    ----------
    lytaf_path : `str`
        Path location where LYTAF files are stored.
        Default = Path stored in confog file.

    print_event_types : `bool`
        If True, prints the artifacts in each lytaf database to screen.

    Returns
    -------
    all_event_types : `list`
        List of all events types in all lytaf databases.

    """
    # Set lytaf_path is not done by user
    if not lytaf_path:
        lytaf_path = get_and_create_download_dir()
    suffixes = ["lyra", "manual", "ppt", "science"]
    all_event_types = []
    # For each database file extract the event types and print them.
    if print_event_types:
        print("\nLYTAF Event Types\n-----------------\n")
    for suffix in suffixes:
        dbname = "annotation_{0}.db".format(suffix)
        # Check database file exists, else download it.
        check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
        # Open SQLITE3 LYTAF files
        connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
        # Create cursor to manipulate data in annotation file
        cursor = connection.cursor()
        cursor.execute("select type from eventType;")
        event_types = cursor.fetchall()
        all_event_types.append(event_types)
        if print_event_types:
            print("----------------\n{0} database\n----------------".format(
                suffix))
            for event_type in event_types:
                print(str(event_type[0]))
            print(" ")
    # Unpack event types in all_event_types into single list
    all_event_types = [
        event_type[0] for event_types in all_event_types
        for event_type in event_types
    ]
    return all_event_types
Exemplo n.º 2
0
def get_lytaf_event_types(lytaf_path=None, print_event_types=True):
    """Prints the different event types in the each of the LYTAF databases.

    Parameters
    ----------
    lytaf_path : `str`
        Path location where LYTAF files are stored.
        Default = LYTAF_PATH defined above.

    print_event_types : `bool`
        If True, prints the artifacts in each lytaf database to screen.

    Returns
    -------
    all_event_types : `list`
        List of all events types in all lytaf databases.

    """
    # Set lytaf_path is not done by user
    if not lytaf_path:
        lytaf_path = LYTAF_PATH
    suffixes = ["lyra", "manual", "ppt", "science"]
    all_event_types = []
    # For each database file extract the event types and print them.
    if print_event_types:
        print("\nLYTAF Event Types\n-----------------\n")
    for suffix in suffixes:
        dbname = "annotation_{0}.db".format(suffix)
        # Check database file exists, else download it.
        check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
        # Open SQLITE3 LYTAF files
        connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
        # Create cursor to manipulate data in annotation file
        cursor = connection.cursor()
        cursor.execute("select type from eventType;")
        event_types = cursor.fetchall()
        all_event_types.append(event_types)
        if print_event_types:
            print("----------------\n{0} database\n----------------"
                  .format(suffix))
            for event_type in event_types:
                print(str(event_type[0]))
            print(" ")
    # Unpack event types in all_event_types into single list
    all_event_types = [event_type[0] for event_types in all_event_types
                       for event_type in event_types]
    return all_event_types
Exemplo n.º 3
0
def get_lytaf_events(start_time, end_time, lytaf_path=None,
                     combine_files=("lyra", "manual", "ppt", "science"),
                     csvfile=None, force_use_local_lytaf=False):
    """
    Extracts combined lytaf file for given time range.

    Given a time range defined by start_time and end_time, this function
    extracts the segments of each LYRA annotation file and combines them.

    Parameters
    ----------
    start_time : `datetime.datetime` or `str`
        Start time of period for which annotation file is required.

    end_time : `datetime.datetime` or `str`
        End time of period for which annotation file is required.

    lytaf_path : `str`
        directory path where the LYRA annotation files are stored.

    combine_files : `tuple` of strings
        States which LYRA annotation files are to be combined.
        Default is all four, i.e. lyra, manual, ppt, science.
        See Notes section for an explanation of each.

    force_use_local_lytaf : `bool`
        Ensures current local version of lytaf files are not replaced by
        up-to-date online versions even if current local lytaf files do not
        cover entire input time range etc.
        Default=False

    Returns
    -------
    lytaf : `numpy.recarray`
        Containing the various parameters stored in the LYTAF files.

    Notes
    -----
    There are four LYRA annotation files which mark different types of events
    or artifacts in the data.  They are named annotation_suffix.db where
    suffix is a variable equalling either lyra, manual, ppt, or science.

    annotation_lyra.db : contains entries regarding possible effects to
        the data due to normal operation of LYRA instrument.

    annotation_manual.db : contains entries regarding possible effects
        to the data due to unusual or manually logged events.

    annotation_ppt.db : contains entries regarding possible effects to
        the data due to pointing or positioning of PROBA2.

    annotation_science.db : contains events in the data scientifically
        interesting, e.g. GOES flares.

    References
    ----------
    Further documentation: http://proba2.oma.be/data/TARDIS

    Examples
    --------
    Get all events in the LYTAF files for January 2014
        >>> from sunpy.instr.lyra import get_lytaf_events
        >>> lytaf = get_lytaf_events('2014-01-01', '2014-02-01')

    """
    # Check inputs
    # Check lytaf path
    if not lytaf_path:
        lytaf_path = LYTAF_PATH
    # Check start_time and end_time is a date string or datetime object
    start_time = parse_time(start_time)
    end_time = parse_time(end_time)
    # Check combine_files contains correct inputs
    if not all(suffix in ["lyra", "manual", "ppt", "science"]
               for suffix in combine_files):
        raise ValueError("Elements in combine_files must be strings equalling "
                         "'lyra', 'manual', 'ppt', or 'science'.")
    # Remove any duplicates from combine_files input
    combine_files = list(set(combine_files))
    combine_files.sort()
    # Convert input times to UNIX timestamp format since this is the
    # time format in the annotation files
    start_time_uts = (start_time - datetime.datetime(1970, 1, 1)).total_seconds()
    end_time_uts = (end_time - datetime.datetime(1970, 1, 1)).total_seconds()

    # Define numpy record array which will hold the information from
    # the annotation file.
    lytaf = np.empty((0,), dtype=[("insertion_time", object),
                                  ("begin_time", object),
                                  ("reference_time", object),
                                  ("end_time", object),
                                  ("event_type", object),
                                  ("event_definition", object)])
    # Access annotation files
    for suffix in combine_files:
        # Check database files are present
        dbname = "annotation_{0}.db".format(suffix)
        check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
        # Open SQLITE3 annotation files
        connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
        # Create cursor to manipulate data in annotation file
        cursor = connection.cursor()
        # Check if lytaf file spans the start and end times defined by
        # user.  If not, download newest version.
        # First get start time of first event and end time of last
        # event in lytaf.
        cursor.execute("select begin_time from event order by begin_time asc "
                       "limit 1;")
        db_first_begin_time = cursor.fetchone()[0]
        db_first_begin_time = datetime.datetime.fromtimestamp(db_first_begin_time)
        cursor.execute("select end_time from event order by end_time desc "
                       "limit 1;")
        db_last_end_time = cursor.fetchone()[0]
        db_last_end_time = datetime.datetime.fromtimestamp(db_last_end_time)
        # If lytaf does not include entire input time range...
        if not force_use_local_lytaf:
            if end_time > db_last_end_time or start_time < db_first_begin_time:
                # ...close lytaf file...
                cursor.close()
                connection.close()
                # ...Download latest lytaf file...
                check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path,
                                    replace=True)
                # ...and open new version of lytaf database.
                connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
                cursor = connection.cursor()
        # Select and extract the data from event table within file within
        # given time range
        cursor.execute("select insertion_time, begin_time, reference_time, "
                       "end_time, eventType_id from event where end_time >= "
                       "{0} and begin_time <= "
                       "{1}".format(start_time_uts, end_time_uts))
        event_rows = cursor.fetchall()
        # Select and extract the event types from eventType table
        cursor.row_factory = sqlite3.Row
        cursor.execute("select * from eventType")
        eventType_rows = cursor.fetchall()
        eventType_id = []
        eventType_type = []
        eventType_definition = []
        for eventType_row in eventType_rows:
            eventType_id.append(eventType_row["id"])
            eventType_type.append(eventType_row["type"])
            eventType_definition.append(eventType_row["definition"])
        # Enter desired information into the lytaf numpy record array
        for event_row in event_rows:
            id_index = eventType_id.index(event_row[4])
            lytaf = np.append(lytaf,
                              np.array((datetime.datetime.utcfromtimestamp(event_row[0]),
                                        datetime.datetime.utcfromtimestamp(event_row[1]),
                                        datetime.datetime.utcfromtimestamp(event_row[2]),
                                        datetime.datetime.utcfromtimestamp(event_row[3]),
                                        eventType_type[id_index],
                                        eventType_definition[id_index]), dtype=lytaf.dtype))
        # Close file
        cursor.close()
        connection.close()
    # Sort lytaf in ascending order of begin time
    np.recarray.sort(lytaf, order="begin_time")

    # If csvfile kwarg is set, write out lytaf to csv file
    if csvfile:
        # Open and write data to csv file.
        with open(csvfile, 'w') as openfile:
            csvwriter = csv.writer(openfile, delimiter=';')
            # Write header.
            csvwriter.writerow(lytaf.dtype.names)
            # Write data.
            for row in lytaf:
                new_row = []
                new_row.append(row[0].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[1].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[2].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[3].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[4])
                new_row.append(row[5])
                csvwriter.writerow(new_row)

    return lytaf
Exemplo n.º 4
0
def get_iris_response(pre_launch=False,
                      response_file=None,
                      response_version=None,
                      force_download=False):
    """Returns IRIS response structure.

    One and only one of pre_launch, response_file and response_version must be set.

    Parameters
    ----------
    pre_launch: `bool`
        Equivalent to setting response_version=2.  Cannot be set
        simultaneously with response_file kwarg. Default=False
    response_file: `str`
        Version number of effective area file to be used.  Cannot be set
        simultaneously with pre_launch kwarg.  Default=latest
    response_version : `int`
        Version number of effective area file to be used. Cannot be set
        simultaneously with response_file or pre_launch kwarg. Default=latest

    Returns
    -------
    iris_response: `dict`
        Various parameters regarding IRIS response.  The following keys:
        date_obs: `datetime.datetime`
        lambda: `astropy.units.Quantity`
        area_sg: `astropy.units.Quantity`
        name_sg: `str`
        dn2phot_sg: `tuple` of length 2
        area_sji: `astropy.units.Quantity`
        name_sji: `str`
        dn2phot_sji:  `tuple` of length 4
        comment: `str`
        version: `int`
        version_date: `datetime.datetime`

    Notes
    -----
    This routine does not calculate time dependent effective areas using
    version 3 and above of the response functions as is done in the SSW version
    of this code.  Therefore, asking it to read a version 3 or above response
    function will result in an error.  This code should be updated in future
    versions to calculate time dependent effective areas.

    """
    # Ensures the file exits in the path given.
    if response_file is not None:
        if not (os.path.isfile(response_file)):
            raise KeyError("Not a valid file path")

    # Ensure conflicting kwargs are not set.
    if response_file:
        response_file_set = True
    else:
        response_file_set = False
    if response_version:
        response_version_set = True
    else:
        response_version_set = False
    if response_file_set + pre_launch + response_version_set != 1:
        raise ValueError(
            "One and only one of kwargs pre_launch, response_file "
            "and response_version must be set.")
    # If pre_launch set, define response_version to 2.
    if pre_launch:
        response_version = 2
    # If response_file not set, define appropriate response file
    # based on version.
    if not response_file:
        try:
            response_filename = RESPONSE_VERSION_FILENAMES[str(
                response_version)]
        except KeyError:
            raise KeyError("Version number not recognized.")
        if response_version > 2:
            warnings.warn(
                "Effective areas are not available (i.e. set  to zero).  "
                "For response file versions > 2 time dependent effective "
                "areas must be calculated via fitting, which is not supported "
                "by this function at this time. "
                "Version of this response file = {0}".format(response_version))
        # Define the directory in which the response file should exist
        # to be the sunpy download directory.
        config = sunpy.util.config.load_config()
        download_dir = config.get('downloads', 'download_dir')
        # Check response file exists in download_dir.  If not, download it.
        check_download_file(response_filename,
                            IRIS_RESPONSE_REMOTE_PATH,
                            download_dir,
                            replace=force_download)
        # Define response file as path + filename.
        response_file = os.path.join(download_dir, response_filename)

    # Read response file and store in a dictionary.
    raw_response_data = scipy.io.readsav(response_file)
    iris_response = dict([(name, raw_response_data["p0"][name][0])
                          for name in raw_response_data["p0"].dtype.names])
    # Convert some properties to more convenient types.
    iris_response["LAMBDA"] = Quantity(iris_response["LAMBDA"], unit=u.nm)
    iris_response["AREA_SG"] = Quantity(iris_response["AREA_SG"], unit=u.cm**2)
    iris_response["AREA_SJI"] = Quantity(iris_response["AREA_SJI"],
                                         unit=u.cm**2)
    iris_response["GEOM_AREA"] = Quantity(iris_response["GEOM_AREA"],
                                          unit=u.cm**2)
    iris_response["VERSION"] = int(iris_response["VERSION"])
    # Convert some properties not found in version below version 3 to
    # more convenient types.
    if iris_response["VERSION"] > 2:
        # If DATE_OBS has a value, convert to datetime, else set to
        # None.
        try:
            iris_response["DATE_OBS"] = parse_time(iris_response["DATE_OBS"])
        except:
            iris_response["DATE_OBS"] = None
        # Convert C_F_TIME to array of datetime objects while
        # conserving shape.
        c_f_time = np.empty(iris_response["C_F_TIME"].shape, dtype=object)
        for i, row in enumerate(iris_response["C_F_TIME"]):
            for j, t in enumerate(row):
                c_f_time[i][j] = parse_time(float(t))
        iris_response["C_F_TIME"] = c_f_time
        # Convert C_F_LAMBDA to Quantity.
        iris_response["C_F_LAMBDA"] = Quantity(iris_response["C_F_LAMBDA"],
                                               unit="nm")
        # Convert C_N_TIME to array of datetime objects while
        # conserving shape.
        c_n_time = np.empty(iris_response["C_N_TIME"].shape, dtype=object)
        for i, row in enumerate(iris_response["C_N_TIME"]):
            for j, t in enumerate(row):
                c_n_time[i][j] = parse_time(float(t))
        iris_response["C_N_TIME"] = c_n_time
        # Convert C_N_LAMBDA to Quantity.
        iris_response["C_N_LAMBDA"] = Quantity(iris_response["C_N_LAMBDA"],
                                               unit="nm")
        # Convert C_S_TIME to array of datetime objects while
        # conserving shape.
        c_s_time = np.empty(iris_response["C_S_TIME"].shape, dtype=object)
        for i, row in enumerate(iris_response["C_S_TIME"]):
            for j, column in enumerate(row):
                for k, t in enumerate(column):
                    c_s_time[i][j][k] = parse_time(float(t))
        iris_response["C_S_TIME"] = c_s_time
        # Convert DATE in ELEMENTS array to array of datetime objects.
        for i, t in enumerate(iris_response["ELEMENTS"]["DATE"]):
            iris_response["ELEMENTS"]["DATE"][i] = parse_time(t.decode())
        # Convert VERSION_DATE to datetime object.
        iris_response["VERSION_DATE"] = parse_time(
            iris_response["VERSION_DATE"].decode())
    else:
        # Change DATE tag in data with version < 2 to VERSION_DATE to
        # be consistent with more recent versions.
        iris_response["VERSION_DATE"] = datetime.datetime(
            int(iris_response["DATE"][0:4]), int(iris_response["DATE"][4:6]),
            int(iris_response["DATE"][6:8]))
        del (iris_response["DATE"])
    return iris_response
Exemplo n.º 5
0
def get_lytaf_events(start_time,
                     end_time,
                     lytaf_path=None,
                     combine_files=("lyra", "manual", "ppt", "science"),
                     csvfile=None,
                     force_use_local_lytaf=False):
    """
    Extracts combined lytaf file for given time range.

    Given a time range defined by start_time and end_time, this function
    extracts the segments of each LYRA annotation file and combines them.

    Parameters
    ----------
    start_time : `astropy.time.Time` or `str`
        Start time of period for which annotation file is required.

    end_time : `astropy.time.Time` or `str`
        End time of period for which annotation file is required.

    lytaf_path : `str`
        directory path where the LYRA annotation files are stored.

    combine_files : `tuple` of strings
        States which LYRA annotation files are to be combined.
        Default is all four, i.e. lyra, manual, ppt, science.
        See Notes section for an explanation of each.

    force_use_local_lytaf : `bool`
        Ensures current local version of lytaf files are not replaced by
        up-to-date online versions even if current local lytaf files do not
        cover entire input time range etc.
        Default=False

    Returns
    -------
    lytaf : `numpy.recarray`
        Containing the various parameters stored in the LYTAF files.

    Notes
    -----
    There are four LYRA annotation files which mark different types of events
    or artifacts in the data.  They are named annotation_suffix.db where
    suffix is a variable equalling either lyra, manual, ppt, or science.

    annotation_lyra.db : contains entries regarding possible effects to
        the data due to normal operation of LYRA instrument.

    annotation_manual.db : contains entries regarding possible effects
        to the data due to unusual or manually logged events.

    annotation_ppt.db : contains entries regarding possible effects to
        the data due to pointing or positioning of PROBA2.

    annotation_science.db : contains events in the data scientifically
        interesting, e.g. GOES flares.

    References
    ----------
    Further documentation: http://proba2.oma.be/data/TARDIS

    Examples
    --------
    Get all events in the LYTAF files for January 2014
        >>> from sunpy.instr.lyra import get_lytaf_events
        >>> lytaf = get_lytaf_events('2014-01-01', '2014-02-01')  # doctest: +REMOTE_DATA

    """
    # Check inputs
    # Check lytaf path
    if not lytaf_path:
        lytaf_path = get_and_create_download_dir()
    # Parse start_time and end_time
    start_time = parse_time(start_time)
    end_time = parse_time(end_time)
    # Check combine_files contains correct inputs
    if not all(suffix in ["lyra", "manual", "ppt", "science"]
               for suffix in combine_files):
        raise ValueError("Elements in combine_files must be strings equalling "
                         "'lyra', 'manual', 'ppt', or 'science'.")
    # Remove any duplicates from combine_files input
    combine_files = list(set(combine_files))
    combine_files.sort()
    # Convert input times to UNIX timestamp format since this is the
    # time format in the annotation files
    start_time_uts = (start_time - Time('1970-1-1')).sec
    end_time_uts = (end_time - Time('1970-1-1')).sec

    # Define numpy record array which will hold the information from
    # the annotation file.
    lytaf = np.empty((0, ),
                     dtype=[("insertion_time", object), ("begin_time", object),
                            ("reference_time", object), ("end_time", object),
                            ("event_type", object),
                            ("event_definition", object)])
    # Access annotation files
    for suffix in combine_files:
        # Check database files are present
        dbname = "annotation_{0}.db".format(suffix)
        check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
        # Open SQLITE3 annotation files
        connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
        # Create cursor to manipulate data in annotation file
        cursor = connection.cursor()
        # Check if lytaf file spans the start and end times defined by
        # user.  If not, download newest version.
        # First get start time of first event and end time of last
        # event in lytaf.
        cursor.execute("select begin_time from event order by begin_time asc "
                       "limit 1;")
        db_first_begin_time = cursor.fetchone()[0]
        db_first_begin_time = datetime.datetime.fromtimestamp(
            db_first_begin_time)
        cursor.execute("select end_time from event order by end_time desc "
                       "limit 1;")
        db_last_end_time = cursor.fetchone()[0]
        db_last_end_time = datetime.datetime.fromtimestamp(db_last_end_time)
        # If lytaf does not include entire input time range...
        if not force_use_local_lytaf:
            if end_time > db_last_end_time or start_time < db_first_begin_time:
                # ...close lytaf file...
                cursor.close()
                connection.close()
                # ...Download latest lytaf file...
                check_download_file(dbname,
                                    LYTAF_REMOTE_PATH,
                                    lytaf_path,
                                    replace=True)
                # ...and open new version of lytaf database.
                connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
                cursor = connection.cursor()
        # Select and extract the data from event table within file within
        # given time range
        cursor.execute("select insertion_time, begin_time, reference_time, "
                       "end_time, eventType_id from event where end_time >= "
                       "{0} and begin_time <= "
                       "{1}".format(start_time_uts, end_time_uts))
        event_rows = cursor.fetchall()
        # Select and extract the event types from eventType table
        cursor.row_factory = sqlite3.Row
        cursor.execute("select * from eventType")
        eventType_rows = cursor.fetchall()
        eventType_id = []
        eventType_type = []
        eventType_definition = []
        for eventType_row in eventType_rows:
            eventType_id.append(eventType_row["id"])
            eventType_type.append(eventType_row["type"])
            eventType_definition.append(eventType_row["definition"])
        # Enter desired information into the lytaf numpy record array
        for event_row in event_rows:
            id_index = eventType_id.index(event_row[4])
            lytaf = np.append(
                lytaf,
                np.array(
                    (Time(datetime.datetime.utcfromtimestamp(event_row[0]),
                          format='datetime'),
                     Time(datetime.datetime.utcfromtimestamp(event_row[1]),
                          format='datetime'),
                     Time(datetime.datetime.utcfromtimestamp(event_row[2]),
                          format='datetime'),
                     Time(datetime.datetime.utcfromtimestamp(event_row[3]),
                          format='datetime'), eventType_type[id_index],
                     eventType_definition[id_index]),
                    dtype=lytaf.dtype))
        # Close file
        cursor.close()
        connection.close()
    # Sort lytaf in ascending order of begin time
    np.recarray.sort(lytaf, order="begin_time")

    # If csvfile kwarg is set, write out lytaf to csv file
    if csvfile:
        # Open and write data to csv file.
        with open(csvfile, 'w') as openfile:
            csvwriter = csv.writer(openfile, delimiter=';')
            # Write header.
            csvwriter.writerow(lytaf.dtype.names)
            # Write data.
            for row in lytaf:
                new_row = []
                new_row.append(row[0].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[1].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[2].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[3].strftime("%Y-%m-%dT%H:%M:%S"))
                new_row.append(row[4])
                new_row.append(row[5])
                csvwriter.writerow(new_row)

    return lytaf
Exemplo n.º 6
0
def _goes_get_chianti_em(longflux, temp, satellite=8, abundances="coronal",
                         download=False, download_dir=DATA_PATH):
    """
    Calculates emission measure from GOES 1-8A flux and temperature.

    This function calculates the emission measure of the solar
    soft X-ray emitting plasma observed by the GOES/XRS from the
    the ratio of the isothermal temperature and observed long channel
    (1-8 angstrom) flux which scales with the emission measure.
    This function is not intended to be called directly but by
    goes_chianti_tem(), although it can be used independently.
    However, if used independently data preparation, such as correctly
    rescaling fluxes for some satellites etc. will not be carried out.
    This is done in goes_chianti_tem().

    Parameters
    ----------
    longflux : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
        Array containing the observed GOES/XRS long channel flux

    temp : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
        Array containing the GOES temperature

    satellite : int (optional)
        Number of GOES satellite used to make observations.
        Important for correct calibration of data.
        Default=8

    abundances : (optional) string equalling either 'coronal' or 'photospheric'.
        States whether photospheric or coronal abundances should be assumed.
        Default='coronal'

    download : (optional) bool
        If True, the GOES emission measure data files are downloaded.
        It is important to do this if a new version of the files has been
        generated due to a new CHIANTI version being released or the launch of
        new GOES satellites since these files were originally downloaded.
        Default=False

    download_dir : (optional) string
        The directory to download the GOES temperature and emission measure
        data files to.
        Default=SunPy default download directory

    Returns
    -------
    em : numpy array
         Array of emission measure values of same length as longflux
         and temp.  [cm**-3]

    Notes
    -----
    This function uses csv files representing the modelled relationship
    between the temperature of the solar soft X-ray emitting plasma
    and the resulting observed flux in the GOES/XRS long channel
    (1-8 angstroms).  goes_chianti_em_cor.csv is used when coronal
    abundances are assumed while goes_chianti_em_pho.csv is used when
    photospheric abundances are assumed.
    (See make_goes_chianti_temp.py for more detail.)

    These files were calculated using the methods of White et al. (2005)
    who used the CHIANTI atomic physics database and GOES transfer
    functions to model the response of the long channel to the
    temperture of the emitting plasma for XRSs onboard various GOES
    satellites.  The emission measure can then be found by scaling the
    ratio of these two properties.  This method assumes an isothermal
    plasma, the ionisation equilibria of Mazzotta et al. (1998), and
    a constant density of 10**10 cm**-3.
    (See White et al. 2005 for justification of this last assumption.)
    This function is based on goes_get_chianti_temp.pro in
    SolarSoftWare written in IDL by Stephen White.

    For correct preparation of GOES data before calculating temperature
    see goes_chianti_tem() (Notes section of docstring).

    References
    ----------
    .. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005, Sol. Phys.,
       227, 231
    .. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
       1998, A&AS, 133, 339

    Examples
    --------
    >>> longflux = np.array([7e-6,7e-6])
    >>> temp = np.array([11,11])
    >>> em = _goes_get_chianti_em(longflux, temp, satellite=15,
                                  abundances="coronal")
    >>> em
    array([  3.45200672e+48,   3.45200672e+48])

    """
    # If download kwarg is True, or required data files cannot be
    # found locally, download required data files.
    check_download_file(FILE_EM_COR, GOES_REMOTE_PATH, download_dir,
                        replace=download)
    check_download_file(FILE_EM_PHO, GOES_REMOTE_PATH, download_dir,
                        replace=download)

    # Check inputs are of correct type
    longflux = np.asanyarray(longflux, dtype=np.float64)
    temp = np.asanyarray(temp, dtype=np.float64)
    int(satellite)
    if satellite < 1:
        raise ValueError("satellite must be the number of a "
                         "valid GOES satellite (>1).")
    # if abundance input is valid create file suffix, abund, equalling
    # of 'cor' or 'pho'.
    if abundances == "coronal":
        data_file = FILE_EM_COR
    elif abundances == "photospheric":
        data_file = FILE_EM_PHO
    else:
        raise ValueError("abundances must be a string equalling "
                         "'coronal' or 'photospheric'.")
    # check input arrays are of same length
    if len(longflux) != len(temp):
        raise ValueError("longflux and temp must have same number of "
                         "elements.")

    # Initialize lists to hold model data of temperature - long channel
    # flux relationship read in from csv file.
    modeltemp = [] # modelled temperature is in log_10 sapce in units of MK
    modelflux = []
    # Determine name of column in csv file containing model ratio values
    # for relevant GOES satellite
    label = "longfluxGOES{0}".format(satellite)

    # Read data representing appropriate temperature--long flux
    # relationship depending on satellite number and assumed abundances.
    with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
        startline = dropwhile(lambda l: l.startswith("#"), csvfile)
        csvreader = csv.DictReader(startline, delimiter=";")
        for row in csvreader:
            modeltemp.append(float(row["log10temp_MK"]))
            modelflux.append(float(row[label]))
    modeltemp = np.asarray(modeltemp)
    modelflux = np.asarray(modelflux)

    # Ensure input values of flux ratio are within limits of model table
    if np.min(np.log10(temp)) < np.min(modeltemp) or \
      np.max(np.log10(temp)) > np.max(modeltemp) or \
      np.isnan(np.min(np.log10(temp))):
        raise ValueError("All values in temp must be within the range "
                         "{0} - {1} MK.".format(np.min(10**modeltemp),
                                                np.max(10**modeltemp)))

    # Perform spline fit to model data
    spline = interpolate.splrep(modeltemp, modelflux, s=0)
    denom = interpolate.splev(np.log10(temp), spline, der=0)
    em = longflux/denom * 1e55

    return em
Exemplo n.º 7
0
def _goes_get_chianti_em(longflux,
                         temp,
                         satellite=8,
                         abundances="coronal",
                         download=False,
                         download_dir=DATA_PATH):
    """
    Calculates emission measure from GOES 1-8A flux and temperature.

    This function calculates the emission measure of the solar
    soft X-ray emitting plasma observed by the GOES/XRS from the
    the ratio of the isothermal temperature and observed long channel
    (1-8 angstrom) flux which scales with the emission measure.
    This function is not intended to be called directly but by
    goes_chianti_tem(), although it can be used independently.
    However, if used independently data preparation, such as correctly
    rescaling fluxes for some satellites etc. will not be carried out.
    This is done in goes_chianti_tem().

    Parameters
    ----------
    longflux : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
        Array containing the observed GOES/XRS long channel flux

    temp : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
        Array containing the GOES temperature

    satellite : int (optional)
        Number of GOES satellite used to make observations.
        Important for correct calibration of data.
        Default=8

    abundances : (optional) string equalling either 'coronal' or 'photospheric'.
        States whether photospheric or coronal abundances should be assumed.
        Default='coronal'

    download : (optional) bool
        If True, the GOES emission measure data files are downloaded.
        It is important to do this if a new version of the files has been
        generated due to a new CHIANTI version being released or the launch of
        new GOES satellites since these files were originally downloaded.
        Default=False

    download_dir : (optional) string
        The directory to download the GOES temperature and emission measure
        data files to.
        Default=SunPy default download directory

    Returns
    -------
    em : numpy array
         Array of emission measure values of same length as longflux
         and temp.  [cm**-3]

    Notes
    -----
    This function uses csv files representing the modelled relationship
    between the temperature of the solar soft X-ray emitting plasma
    and the resulting observed flux in the GOES/XRS long channel
    (1-8 angstroms).  goes_chianti_em_cor.csv is used when coronal
    abundances are assumed while goes_chianti_em_pho.csv is used when
    photospheric abundances are assumed.
    (See make_goes_chianti_temp.py for more detail.)

    These files were calculated using the methods of White et al. (2005)
    who used the CHIANTI atomic physics database and GOES transfer
    functions to model the response of the long channel to the
    temperture of the emitting plasma for XRSs onboard various GOES
    satellites.  The emission measure can then be found by scaling the
    ratio of these two properties.  This method assumes an isothermal
    plasma, the ionisation equilibria of Mazzotta et al. (1998), and
    a constant density of 10**10 cm**-3.
    (See White et al. 2005 for justification of this last assumption.)
    This function is based on goes_get_chianti_temp.pro in
    SolarSoftWare written in IDL by Stephen White.

    For correct preparation of GOES data before calculating temperature
    see goes_chianti_tem() (Notes section of docstring).

    References
    ----------
    .. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005, Sol. Phys.,
       227, 231
    .. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
       1998, A&AS, 133, 339

    Examples
    --------
    >>> longflux = np.array([7e-6,7e-6])
    >>> temp = np.array([11,11])
    >>> em = _goes_get_chianti_em(longflux, temp, satellite=15,
                                  abundances="coronal")
    >>> em
    array([  3.45200672e+48,   3.45200672e+48])

    """
    # If download kwarg is True, or required data files cannot be
    # found locally, download required data files.
    check_download_file(FILE_EM_COR,
                        GOES_REMOTE_PATH,
                        download_dir,
                        replace=download)
    check_download_file(FILE_EM_PHO,
                        GOES_REMOTE_PATH,
                        download_dir,
                        replace=download)

    # Check inputs are of correct type
    longflux = np.asanyarray(longflux, dtype=np.float64)
    temp = np.asanyarray(temp, dtype=np.float64)
    int(satellite)
    if satellite < 1:
        raise ValueError("satellite must be the number of a "
                         "valid GOES satellite (>1).")
    # if abundance input is valid create file suffix, abund, equalling
    # of 'cor' or 'pho'.
    if abundances == "coronal":
        data_file = FILE_EM_COR
    elif abundances == "photospheric":
        data_file = FILE_EM_PHO
    else:
        raise ValueError("abundances must be a string equalling "
                         "'coronal' or 'photospheric'.")
    # check input arrays are of same length
    if len(longflux) != len(temp):
        raise ValueError("longflux and temp must have same number of "
                         "elements.")

    # Initialize lists to hold model data of temperature - long channel
    # flux relationship read in from csv file.
    modeltemp = []  # modelled temperature is in log_10 sapce in units of MK
    modelflux = []
    # Determine name of column in csv file containing model ratio values
    # for relevant GOES satellite
    label = "longfluxGOES{0}".format(satellite)

    # Read data representing appropriate temperature--long flux
    # relationship depending on satellite number and assumed abundances.
    with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
        startline = dropwhile(lambda l: l.startswith("#"), csvfile)
        csvreader = csv.DictReader(startline, delimiter=";")
        for row in csvreader:
            modeltemp.append(float(row["log10temp_MK"]))
            modelflux.append(float(row[label]))
    modeltemp = np.asarray(modeltemp)
    modelflux = np.asarray(modelflux)

    # Ensure input values of flux ratio are within limits of model table
    if np.min(np.log10(temp)) < np.min(modeltemp) or \
      np.max(np.log10(temp)) > np.max(modeltemp) or \
      np.isnan(np.min(np.log10(temp))):
        raise ValueError("All values in temp must be within the range "
                         "{0} - {1} MK.".format(np.min(10**modeltemp),
                                                np.max(10**modeltemp)))

    # Perform spline fit to model data
    spline = interpolate.splrep(modeltemp, modelflux, s=0)
    denom = interpolate.splev(np.log10(temp), spline, der=0)
    em = longflux / denom * 1e55

    return em
def create_lyra_time_series(start_time, end_time, level=3, channels=[1,2,3,4],
                            lytaf_path=LYTAF_PATH, lytaf_remote_path=LYTAF_REMOTE_PATH,
                            lyra_path_data=LYRA_DATA_PATH,
                            lyra_remote_data_path=LYRA_REMOTE_DATA_PATH,
                            exclude_occultation_season=False):
    """
    Creates a time series of LYRA standard Unit 2 data.

    Parameters
    ----------
    start_time : time format compatible by sunpy.time.parse_time()
        start time of period for flare list to be generated.

    end_time : time format compatible by sunpy.time.parse_time()
        end time of period for flare list to be generated.

    level : `int` equal to 1, 2, or 3.
        LYRA data level.
        1: raw data
        2: calibrated data
        3: one minuted-averaged data

    channels : `list` of ints in range 1-4 inclusive.
        Channels for which time series are to be created.
        1: Lyman Alpha
        2: Herzberg
        3: Aluminium
        4: Zirconium

    lytaf_path : string
        directory path where the LYRA annotation files are stored.

    lytaf_remote_path : string
        URL from which to download LYRA annotation files if not available locally.

    lyra_data_path : string
        directory path where the LYRA data files are stored.

    lyra_data_path : string
        URL from which to download LYRA data files if not available locally.

    exclude_eclipse_season : bool
        Determines whether LYRA UV Occulation season is discarded from
        input period.  Default=True.

    Returns
    -------
    data : `numpy.recarray`
        Time series for input period containing time and irradiance
        values for each channel.

    Examples
    --------

    """
    # Check that inputs are correct.
    if not level in range(1,4):
        raise ValueError("level must be an int equal to 1, 2, or 3. " + \
                         "Value entered = {0}".format(level))
    if not all(channels) in range(1,5):
        raise ValueError("Values in channels must be ints equal to 1, 2, 3, or 4." + \
                         "Value entered = {0}".format(level))
    # Ensure input start and end times are datetime objects
    start_time = parse_time(start_time)
    end_time = parse_time(end_time)
    # Create list of datetime objects for each day in time period.
    start_until_end = end_time-start_time
    dates = [start_time+datetime.timedelta(days=i)
             for i in range(start_until_end.days+1)]
    # Exclude dates during LYRA eclipse season if keyword set and raise
    # warning any dates are skipped.
    if exclude_occultation_season:
        dates, skipped_dates = _remove_lyra_occultation_dates(dates)
        # Raise Warning here if dates are skipped
        for date in skipped_dates:
            warn("{0} has been skipped due to LYRA eclipse season.".format(date))
    # Raise Error if no valid dates remain
    if dates == []:
        raise ValueError("No valid dates within input date range.")
    # Search for daily FITS files for input time period.
    # First, create empty arrays to hold entire time series of input
    # time range.
    data_dtypes = [("CHANNEL{0}".format(channel), float) for channel in channels]
    data_dtypes.insert(0, ("TIME", object))
    data = np.empty((0,), dtype=data_dtypes)
    for date in dates:
        fitsfile = "lyra_{0}-000000_lev{1}_std.fits".format(
            date.strftime("%Y%m%d"), level)
        # Check each fitsfile exists locally.  If not, download it.
        try:
            check_download_file(fitsfile,
                                "{0}/{1}".format(lyra_remote_data_path,
                                                 date.strftime("%Y/%m/%d/")),
                                lyra_data_path)
            # Append data in files to time series
            with fits.open(os.path.join(lyra_data_path, fitsfile)) as hdulist:
                n = len(hdulist[1].data)
                data = np.append(data, np.empty((n,), dtype=data_dtypes))
                data["TIME"][-n:] = _time_list_from_lyra_fits(hdulist)
                for channel in channels:
                    data["CHANNEL{0}".format(channel)][-n:] = \
                      hdulist[1].data["CHANNEL{0}".format(channel)]
        except HTTPError:
            warn("Skipping file as it could not be found: {0}".format(urlparse.urljoin(
                "{0}{1}".format(lyra_remote_data_path,
                                date.strftime("%Y/%m/%d/")), fitsfile)))
        # Truncate time series to match start and end input times.
        w = np.logical_and(data["TIME"] >= start_time, data["TIME"] < end_time)
        data = data[w]
    return data