示例#1
0
    def apply_filters(self, filters):

        if not filters or not len(filters):
            return self

        filtered_dataset = self.clone()

        time_filter = FltHelper.get_time_filter(
            filters)  # Firts filter by time for reducing arrays length
        if time_filter:
            filtered_dataset = self.apply_time_filter(time_filter,
                                                      time_filter["table"])

        for filter in filters:
            table_id = filter["table"]
            if table_id not in [
                    "EVENTS", "RATE"
            ] or filter["column"] != CONFIG.TIME_COLUMN:  # Exclude time filter
                if table_id in filtered_dataset.tables:
                    filtered_dataset.tables[
                        table_id] = filtered_dataset.tables[
                            table_id].apply_filter(filter)
                else:
                    logging.error("dataset.apply_filters wrong table_id: %s" %
                                  table_id)

        return filtered_dataset
示例#2
0
def load_dataset_from_intermediate_file(fname):
    """Save Stingray object to intermediate file."""

    from stingray.lightcurve import Lightcurve
    from stingray.events import EventList
    from stingray.crossspectrum import Crossspectrum
    from hendrics.io import get_file_type
    from stingray.io import _retrieve_pickle_object

    # This will return an EventList, a light curve, a Powerspectrum, ...
    # depending on the contents of the file
    try:
        ftype, contents = get_file_type(fname)
    except:
        contents = _retrieve_pickle_object(fname)

    if isinstance(contents, Lightcurve):
        return DataSet.get_lightcurve_dataset_from_stingray_Lightcurve(
            contents)

    elif isinstance(contents, EventList):
        return DataSet.get_eventlist_dataset_from_stingray_Eventlist(contents)

    # This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
    elif isinstance(contents, Crossspectrum):
        logging.error("Unsupported intermediate file type: Crossspectrum")

    else:
        logging.error("Unsupported intermediate file type: %s" %
                      type(stingray_object).__name__)

    return None
示例#3
0
def append_file_to_dataset(filename, nextfile, target):
    destination = get_destination(filename, target)
    if not destination:
        return common_error("Invalid file or cache key")

    if not nextfile:
        return common_error("No nextfile setted")

    if not SessionHelper.is_file_uploaded(nextfile):
        if not FileUtils.file_exist(target, nextfile):
            logging.error("Filename not uploaded for nextfile %s" % nextfile)
            return common_error("Nextfile not uploaded")

    next_destination = FileUtils.get_destination(target, nextfile)
    if not FileUtils.is_valid_file(next_destination):
        return common_error("Invalid next file")

    logging.debug("append_file_to_dataset, destination: %s" % destination)
    logging.debug("append_file_to_dataset, next_destination: %s" % next_destination)

    new_filename = DaveEngine.append_file_to_dataset(destination, next_destination)

    logging.debug("append_file_to_dataset, cache_key: %s" % new_filename)

    return json.dumps(new_filename)
示例#4
0
 def default(self, obj):
     try:
         if isinstance(obj, int):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return int(obj)
         elif isinstance(obj, float):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return float(obj)
         if isinstance(obj, numpy.integer):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return int(obj)
         elif isinstance(obj, numpy.floating):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return float(obj)
         elif isinstance(obj, complex):
             return self.default(numpy.real(obj))
         elif isinstance(obj, numpy.ndarray):
             return obj.tolist()
         else:
             return super(NPEncoder, self).default(obj)
     except:
         logging.error(ExHelper.getException('NPEncoder'))
         return None
示例#5
0
文件: table.py 项目: swapsha96/dave
    def apply_filter(self, filter):
        column_name = filter["column"]
        if column_name not in self.columns:
            logging.error("table.apply_filter wrong column: %s" % column_name)
            return self

        if filter["from"] > filter["to"]:
            logging.error("table.apply_filter wrong from-to: %s" % column_name)
            return self

        filtered_table = Table(self.id)
        for tmp_column_name in self.columns:
            filtered_table.columns[tmp_column_name] = Column(tmp_column_name)

        column = self.columns[column_name]

        values = np.array(column.values)
        filtered_indexes = np.where((values >= filter["from"])
                                    & (values <= filter["to"]))[0]

        for column_name in self.columns:
            col_values, col_error_values = self.columns[
                column_name].get_values(filtered_indexes)
            filtered_table.columns[column_name].add_values(
                col_values, col_error_values)

        return filtered_table
示例#6
0
def append_file_to_dataset(filename, nextfile, target):
    destination = get_destination(filename, target)
    if not destination:
        return common_error("Invalid file or cache key")

    if not nextfile:
        return common_error("No nextfile setted")

    if not SessionHelper.is_file_uploaded(nextfile):
        if not FileUtils.file_exist(target, nextfile):
            logging.error("Filename not uploaded for nextfile %s" % nextfile)
            return common_error("Nextfile not uploaded")

    next_destination = FileUtils.get_destination(target, nextfile)
    if not FileUtils.is_valid_file(next_destination):
        return common_error("Invalid next file")

    logging.debug("append_file_to_dataset, destination: %s" % destination)
    logging.debug("append_file_to_dataset, next_destination: %s" %
                  next_destination)

    new_filename = DaveEngine.append_file_to_dataset(destination,
                                                     next_destination)

    logging.debug("append_file_to_dataset, cache_key: %s" % new_filename)

    return json.dumps(new_filename)
示例#7
0
 def default(self, obj):
     try:
         if isinstance(obj, int):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return int(obj)
         elif isinstance(obj, float):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return float(obj)
         if isinstance(obj, numpy.integer):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return int(obj)
         elif isinstance(obj, numpy.floating):
             if obj > CONFIG.BIG_NUMBER:
                 return CONFIG.BIG_NUMBER
             if obj < -CONFIG.BIG_NUMBER:
                 return -CONFIG.BIG_NUMBER
             return float(obj)
         elif isinstance(obj, complex):
             return self.default(numpy.real(obj))
         elif isinstance(obj, numpy.ndarray):
             return obj.tolist()
         else:
             return super(NPEncoder, self).default(obj)
     except:
         logging.error(ExHelper.getException('NPEncoder'))
         return None
示例#8
0
def get_colors_lightcurve(src_destination, bck_destination, gti_destination, filters, axis, dt):

    if len(axis) != 2:
        logging.warn("Wrong number of axis")
        return None

    try:
        filters = FltHelper.apply_bin_size_to_filters(filters, dt)

        count_column_name = "PI"
        color_keys = FltHelper.get_color_keys_from_filters(filters)
        filtered_datasets = split_dataset_with_color_filters(src_destination, filters, color_keys, count_column_name, gti_destination)
        color_axis = get_color_axis_for_ds()

        # Creates lightcurves array applying bck and gtis from each color
        logging.debug("Create color lightcurves ....")
        lightcurves = get_lightcurves_from_datasets_array(filtered_datasets, color_keys, count_column_name, color_axis, bck_destination, filters, gti_destination, dt)
        filtered_datasets = None  # Dispose memory

        # Preapares the result
        logging.debug("Result color lightcurves ....")
        if len(lightcurves) == 4:
            if lightcurves[0]:
                result = push_to_results_array([], lightcurves[0].time)
                result = push_divided_values_to_results_array(result, lightcurves[0].countrate, lightcurves[1].countrate)
                result = push_divided_values_to_results_array(result, lightcurves[2].countrate, lightcurves[3].countrate)
                return result

    except:
        logging.error(str(sys.exc_info()))

    return None
示例#9
0
def load_dataset_from_intermediate_file(fname):
    """Save Stingray object to intermediate file."""

    from stingray.lightcurve import Lightcurve
    from stingray.events import EventList
    from stingray.crossspectrum import Crossspectrum
    from hendrics.io import get_file_type
    from stingray.io import _retrieve_pickle_object

    # This will return an EventList, a light curve, a Powerspectrum, ...
    # depending on the contents of the file
    try:
        ftype, contents = get_file_type(fname)
    except:
        contents = _retrieve_pickle_object(fname)

    if isinstance(contents, Lightcurve):
        return DataSet.get_lightcurve_dataset_from_stingray_Lightcurve(contents)

    elif isinstance(contents, EventList):
        return DataSet.get_eventlist_dataset_from_stingray_Eventlist(contents)

    # This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
    elif isinstance(contents, Crossspectrum):
        logging.error("Unsupported intermediate file type: Crossspectrum")

    else:
        logging.error("Unsupported intermediate file type: %s" % type(stingray_object).__name__)

    return None
示例#10
0
def get_lightcurve_ds_from_events_ds(destination, axis, dt):

    try:

        if len(axis) != 2:
            logging.warn("Wrong number of axis")
            return ""

        dataset = DaveReader.get_file_dataset(destination)
        lc = get_lightcurve_from_dataset(dataset, axis, "", [], "", dt)

        if lc:
            #Changes lc format to stingray_addons format
            tmp_lc = {}
            tmp_lc['lc'] = lc.countrate
            tmp_lc['elc'] = []  # TODO: Get error from lightcurve
            tmp_lc['time'] = lc.time
            tmp_lc['GTI'] = lc.gti

            lc_dataset = DataSet.get_lightcurve_dataset_from_stingray_lcurve(tmp_lc, dataset.tables["EVENTS"].header, dataset.tables["EVENTS"].header_comments,
                                                                            "RATE", "TIME")
            dataset = None  # Dispose memory
            lc = None  # Dispose memory

            new_cache_key = DsCache.get_key(destination + "|ligthcurve")
            DsCache.add(new_cache_key, dataset)  # Adds new cached dataset for new key
            return new_cache_key

    except:
        logging.error(str(sys.exc_info()))

    return ""
示例#11
0
    def apply_filter(self, filter):
        column_name = filter["column"]
        if column_name not in self.columns:
            logging.error("table.apply_filter wrong column: %s" % column_name)
            return self

        if filter["from"] > filter["to"]:
            logging.error("table.apply_filter wrong from-to: %s" % column_name)
            return self

        filtered_table = Table(self.id)
        for tmp_column_name in self.columns:
            filtered_table.columns[tmp_column_name] = Column(tmp_column_name)

        column = self.columns[column_name]
        filtered_indexes = np.array([
            i for i in range(len(column.values))
            if ((column.values[i] >= filter["from"]) and (
                column.values[i] <= filter["to"]))
        ])

        for i in filtered_indexes:
            filtered_table.add_row(self.get_row(i))

        return filtered_table
示例#12
0
def get_intermediate_file(filepath, target):
    try:
        stingray_object = DaveReader.get_stingray_object(filepath)
        if stingray_object:
            filename = FileUtils.get_intermediate_filename(
                target, filepath, HEN_FILE_EXTENSION)
            if DaveReader.save_to_intermediate_file(stingray_object, filename):
                return filename
    except:
        logging.error(ExHelper.getException('get_intermediate_file'))

    return None
示例#13
0
def get_intermediate_files(filepaths, target):
    filenames = []

    for filepath in filepaths:
        if not FileUtils.is_valid_file(filepath):
            logging.error("Filepath not found or invalid: %s" % filepath)
        else:
            filename = DaveBulk.get_intermediate_file(filepath, target)
            logging.debug("get_intermediate_files filename: %s" % filename)
            if filename:
                filenames.append(filename)

    return json.dumps(filenames, cls=NPEncoder)
示例#14
0
def build_data_list(dataset, axis):
    data = []
    for i in range(len(axis)):
        table_name = axis[i]["table"]
        if table_name in dataset.tables:
            column = dataset.tables[table_name].columns[axis[i]["column"]]
            column_data = dict()
            column_data["values"] = column.values
            column_data["error_values"] = column.error_values
            data = np.append(data, [column_data])
        else:
            logging.error("Accessing unknown table: %s" % table_name)
    return data
示例#15
0
def get_intermediate_files(filepaths, target):
    filenames = []

    for filepath in filepaths:
        if not FileUtils.is_valid_file(filepath):
            logging.error("Filepath not found or invalid: %s" % filepath)
        else:
            filename = DaveBulk.get_intermediate_file(filepath, target)
            logging.debug("get_intermediate_files filename: %s" % filename)
            if filename:
                filenames.append(filename)

    return json.dumps(filenames, cls=NPEncoder)
示例#16
0
def get_destination(target, filename):
    try:
        if CONFIG.IS_LOCAL_SERVER:
            if filename.startswith('/') and os.path.isfile(filename):
                # This is supposed to be an absolute path
                return filename
            else:
                # Relative path
                return "/".join([target, filename])
        else:
            return "/".join([target, secure_filename(filename)])
    except:
        logging.error(ExHelper.getException('get_destination'))
        return ""
示例#17
0
def get_destination(target, filename):
    try:
        if CONFIG.IS_LOCAL_SERVER:
            if filename.startswith('/') and os.path.isfile(filename):
                # This is supposed to be an absolute path
                return filename
            else:
                # Relative path
                return "/".join([target, filename])
        else:
            return "/".join([target, secure_filename(filename)])
    except:
        logging.error(ExHelper.getException('get_destination'))
        return ""
示例#18
0
def get_lightcurve(src_destination, bck_destination, gti_destination, filters, axis, dt):

    time_vals = []
    count_rate = []
    error_values = []

    try:
        if len(axis) != 2:
            logging.warn("Wrong number of axis")
            return None

        filters = FltHelper.get_filters_clean_color_filters(filters)
        filters = FltHelper.apply_bin_size_to_filters(filters, dt)

        filtered_ds = get_filtered_dataset(src_destination, filters, gti_destination)
        if not DsHelper.is_events_dataset(filtered_ds) \
            and not DsHelper.is_lightcurve_dataset(filtered_ds):
            logging.warn("Wrong dataset type")
            return None

        if DsHelper.is_events_dataset(filtered_ds):
            # Creates lightcurves by gti and joins in one
            logging.debug("Create lightcurve ....Event count: " + str(len(filtered_ds.tables["EVENTS"].columns["TIME"].values)))

            lc = get_lightcurve_from_dataset(filtered_ds, axis, bck_destination, filters, gti_destination, dt)
            filtered_ds = None  # Dispose memory

            if lc:
                logging.debug("Result time: " + str(len(lc.time)))
                time_vals = lc.time
                count_rate = lc.countrate
                error_values = []  # TODO: Implement error values on Stingray
                #lc = None  # Dispose memory

        elif DsHelper.is_lightcurve_dataset(filtered_ds):
            #If dataset is LIGHTCURVE type
            time_vals = filtered_ds.tables["RATE"].columns["TIME"].values
            count_rate = filtered_ds.tables["RATE"].columns["RATE"].values
            error_values = filtered_ds.tables["RATE"].columns["ERROR"].values

    except:
        logging.error(str(sys.exc_info()))

    # Preapares the result
    logging.debug("Result lightcurve .... " + str(len(time_vals)))
    result = push_to_results_array([], time_vals)
    result = push_to_results_array(result, count_rate)
    result = push_to_results_array(result, error_values)
    return result
示例#19
0
def get_divided_lightcurve_ds(lc0_destination, lc1_destination):

    try:

        lc0_ds = DaveReader.get_file_dataset(lc0_destination)
        if not DsHelper.is_lightcurve_dataset(lc0_ds):
            logging.warn("Wrong dataset type for lc0")
            return ""

        count_rate_0 = np.array(lc0_ds.tables["RATE"].columns["RATE"].values)

        lc1_ds = DaveReader.get_file_dataset(lc1_destination)
        if not DsHelper.is_lightcurve_dataset(lc1_ds):
            logging.warn("Wrong dataset type for lc1")
            return ""

        count_rate_1 = np.array(lc1_ds.tables["RATE"].columns["RATE"].values)

        if count_rate_0.shape == count_rate_1.shape:

            ret_lc_ds = lc0_ds.clone(True)

            with np.errstate(all='ignore'): # Ignore divisions by 0 and others
                count_rate = np.nan_to_num(count_rate_0 / count_rate_1)
            count_rate[count_rate > BIG_NUMBER]=0

            ret_lc_ds.tables["RATE"].columns["RATE"].clear()
            ret_lc_ds.tables["RATE"].columns["RATE"].add_values(count_rate) # TODO: Set error from lightcurve

            lc0_ds = None  # Dispose memory
            lc1_ds = None  # Dispose memory
            count_rate_1 = None  # Dispose memory
            count_rate_0 = None  # Dispose memory
            count_rate = None  # Dispose memory

            new_cache_key = DsCache.get_key(lc0_destination + "|" + lc1_destination + "|ligthcurve")
            DsCache.add(new_cache_key, ret_lc_ds)  # Adds new cached dataset for new key
            return new_cache_key

        else:
            logging.warn("Lightcurves have different shapes.")
            return None

    except:
        logging.error(str(sys.exc_info()))

    return ""
示例#20
0
def save_to_intermediate_file(stingray_object, fname):
    """Save Stingray object to intermediate file."""
    from stingray.lightcurve import Lightcurve
    from stingray.events import EventList
    from stingray.crossspectrum import Crossspectrum
    from hendrics.io import save_lcurve, save_events, save_pds
    if isinstance(stingray_object, Lightcurve):
        save_lcurve(stingray_object, fname)
    elif isinstance(stingray_object, EventList):
        save_events(stingray_object, fname)
    # This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
    elif isinstance(stingray_object, Crossspectrum):
        save_pds(stingray_object, fname)
    else:
        logging.error("save_to_intermediate_file: Unknown object type: %s" % type(stingray_object).__name__)
        return False

    return True
示例#21
0
def save_to_intermediate_file(stingray_object, fname):
    """Save Stingray object to intermediate file."""
    from stingray.lightcurve import Lightcurve
    from stingray.events import EventList
    from stingray.crossspectrum import Crossspectrum
    from hendrics.io import save_lcurve, save_events, save_pds
    if isinstance(stingray_object, Lightcurve):
        save_lcurve(stingray_object, fname)
    elif isinstance(stingray_object, EventList):
        save_events(stingray_object, fname)
    # This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
    elif isinstance(stingray_object, Crossspectrum):
        save_pds(stingray_object, fname)
    else:
        logging.error("save_to_intermediate_file: Unknown object type: %s" %
                      type(stingray_object).__name__)
        return False

    return True
示例#22
0
def get_joined_lightcurves_from_colors(src_destination, bck_destination, gti_destination, filters, axis, dt):

    if len(axis) != 2:
        logging.warn("Wrong number of axis")
        return None

    try:
        filters = FltHelper.apply_bin_size_to_filters(filters, dt)

        # Prepares SRC_LC
        clean_filters = FltHelper.get_filters_clean_color_filters(filters)
        filtered_ds = get_filtered_dataset(src_destination, clean_filters, gti_destination)

        # Creates src lightcurve applying bck and gtis
        src_lc = get_lightcurve_from_dataset(filtered_ds, axis, bck_destination, clean_filters, gti_destination, dt)
        if not src_lc:
            logging.warn("Cant create lc_src")
            return None

        count_column_name = "PI"
        color_keys = FltHelper.get_color_keys_from_filters(filters)
        filtered_datasets = split_dataset_with_color_filters(src_destination, filters, color_keys, count_column_name, gti_destination)
        color_axis = get_color_axis_for_ds()

        # Creates lightcurves array applying bck and gtis from each color
        logging.debug("Create color lightcurves ....")
        lightcurves = get_lightcurves_from_datasets_array(filtered_datasets, color_keys, count_column_name, color_axis, bck_destination, filters, gti_destination, dt)
        filtered_datasets = None  # Dispose memory

        if len(lightcurves) == 2:

            # Preapares the result
            logging.debug("Result joined lightcurves ....")
            result = push_to_results_array([], src_lc.countrate)
            result = push_divided_values_to_results_array(result, lightcurves[0].countrate, lightcurves[1].countrate)
            result = push_to_results_array(result, src_lc.time)
            return result

    except:
        logging.error(str(sys.exc_info()))

    return None
示例#23
0
def get_stingray_object(destination, time_offset=0):

    if not destination:
        return None

    filename = os.path.splitext(destination)[0]
    file_extension = magic.from_file(destination)
    logging.debug("File extension: %s" % file_extension)

    if file_extension.find("FITS") == 0:

        # Opening Fits
        hdulist = fits.open(destination, memmap=True)

        if 'EVENTS' in hdulist:
            # If EVENTS extension found, consider the Fits as EVENTS Fits
            fits_data = load_events_and_gtis(destination,
                                             additional_columns=['PI', "PHA"],
                                             gtistring=CONFIG.GTI_STRING,
                                             hduname='EVENTS',
                                             column=CONFIG.TIME_COLUMN)
            return substract_tstart_from_events(fits_data, time_offset)

        elif 'RATE' in hdulist:
            # If RATE extension found, consider the Fits as LIGHTCURVE Fits
            # Reads the lightcurve with hendrics
            outfile = lcurve_from_fits(destination,
                                       gtistring=get_hdu_string_from_hdulist(
                                           CONFIG.GTI_STRING, hdulist),
                                       timecolumn=CONFIG.TIME_COLUMN,
                                       ratecolumn=None,
                                       ratehdu=1,
                                       fracexp_limit=CONFIG.FRACEXP_LIMIT)[0]
            return substract_tstart_from_lcurve(load_lcurve(outfile),
                                                time_offset)

        else:
            logging.error("Unsupported FITS type!")

    else:
        logging.error("Unknown file extension: %s" % file_extension)
        return None
示例#24
0
    def apply_filters(self, filters):

        if not filters or not len(filters):
            return self

        filtered_dataset = self.clone()

        time_filter = FltHelper.get_time_filter(filters)  # Firts filter by time for reducing arrays length
        if time_filter:
            filtered_dataset = self.apply_time_filter(time_filter, time_filter["table"])

        for filter in filters:
            table_id = filter["table"]
            if table_id not in ["EVENTS", "RATE"] or filter["column"] != CONFIG.TIME_COLUMN:  # Exclude time filter
                if table_id in filtered_dataset.tables:
                    filtered_dataset.tables[table_id] = filtered_dataset.tables[table_id].apply_filter(filter)
                else:
                    logging.error("dataset.apply_filters wrong table_id: %s" % table_id)

        return filtered_dataset
示例#25
0
    def apply_filters(self, filters):

        if not filters or not len(filters):
            return self

        filtered_dataset = self.clone()

        for filter in filters:
            table_id = filter["table"]
            if table_id in filtered_dataset.tables:
                if table_id == "EVENTS" and filter["column"] == "TIME":
                    filtered_dataset = self.apply_time_filter(filter)
                else:
                    filtered_dataset.tables[
                        table_id] = filtered_dataset.tables[
                            table_id].apply_filter(filter)
            else:
                logging.error("dataset.apply_filters wrong table_id: %s" %
                              table_id)

        return filtered_dataset
示例#26
0
def get_joined_lightcurves(lc0_destination, lc1_destination, filters, axis, dt):

    try:

        if len(axis) != 2:
            logging.warn("Wrong number of axis")
            return None

        filters = FltHelper.get_filters_clean_color_filters(filters)
        filters = FltHelper.apply_bin_size_to_filters(filters, dt)

        lc0_ds = get_filtered_dataset(lc0_destination, filters)
        if not DsHelper.is_lightcurve_dataset(lc0_ds):
            logging.warn("Wrong dataset type for lc0")
            return None

        lc1_ds = get_filtered_dataset(lc1_destination, filters)
        if not DsHelper.is_lightcurve_dataset(lc1_ds):
            logging.warn("Wrong dataset type for lc1")
            return None

        #  Problaby here we can use a stronger checking
        if len(lc0_ds.tables["RATE"].columns["TIME"].values) == len(lc1_ds.tables["RATE"].columns["TIME"].values):

            # Preapares the result
            logging.debug("Result joined lightcurves ....")
            result = push_to_results_array([], lc0_ds.tables["RATE"].columns["RATE"].values)
            result = push_to_results_array(result, lc1_ds.tables["RATE"].columns["RATE"].values)
            result = push_to_results_array(result, lc0_ds.tables["RATE"].columns["TIME"].values)
            return result

        else:
            logging.warn("Lightcurves have different durations.")
            return None

    except:
        logging.error(str(sys.exc_info()))

    return None
示例#27
0
    def apply_filter(self, filter):
        column_name = filter["column"]
        if column_name not in self.columns:
            logging.error("table.apply_filter wrong column: %s" % column_name)
            return self

        if filter["from"] > filter["to"]:
            logging.error("table.apply_filter wrong from-to: %s" % column_name)
            return self

        filtered_table = Table(self.id)
        for tmp_column_name in self.columns:
            filtered_table.columns[tmp_column_name] = Column(tmp_column_name)

        column = self.columns[column_name]

        values = np.array(column.values)
        filtered_indexes = np.where((values >= filter["from"]) & (values <= filter["to"]))[0]

        for column_name in self.columns:
            col_values, col_error_values = self.columns[column_name].get_values(filtered_indexes)
            filtered_table.columns[column_name].add_values(col_values, col_error_values)

        return filtered_table
示例#28
0
def get_stingray_object(destination, time_offset=0):

    if not destination:
        return None

    filename = os.path.splitext(destination)[0]
    file_extension = magic.from_file(destination)
    logging.debug("File extension: %s" % file_extension)

    if file_extension.find("FITS") == 0:

        # Opening Fits
        hdulist = fits.open(destination, memmap=True)

        if 'EVENTS' in hdulist:
            # If EVENTS extension found, consider the Fits as EVENTS Fits
            fits_data = load_events_and_gtis(destination,
                                             additional_columns=['PI', "PHA"],
                                             gtistring=CONFIG.GTI_STRING,
                                             hduname='EVENTS', column=CONFIG.TIME_COLUMN)
            return substract_tstart_from_events(fits_data, time_offset)

        elif 'RATE' in hdulist:
            # If RATE extension found, consider the Fits as LIGHTCURVE Fits
            # Reads the lightcurve with hendrics
            outfile = lcurve_from_fits(destination, gtistring=get_hdu_string_from_hdulist(CONFIG.GTI_STRING, hdulist),
                                     timecolumn=CONFIG.TIME_COLUMN, ratecolumn=None, ratehdu=1,
                                     fracexp_limit=CONFIG.FRACEXP_LIMIT)[0]
            return substract_tstart_from_lcurve(load_lcurve(outfile), time_offset)

        else:
            logging.error("Unsupported FITS type!")

    else:
        logging.error("Unknown file extension: %s" % file_extension)
        return None
示例#29
0
def get_destination(filename, target):
    if not filename:
        logging.error("No filename or cache key setted for filename %s" % filename)
        return None

    if not SessionHelper.is_file_uploaded(filename):
        if not DsCache.contains(filename):
            if not FileUtils.file_exist(target, filename):
                logging.error("Filename not uploaded or not found in cache for filename %s" % filename)
                return None

    destination = FileUtils.get_destination(target, filename)
    if not FileUtils.is_valid_file(destination):
        if not DsCache.contains(filename):
            logging.error("Invalid file or not found in cache filename %s" % filename)
            return None
        else:
            destination = filename # Filename represents only a joined dataset key, not a real file

    return destination
示例#30
0
def get_destination(filename, target):
    if not filename:
        logging.error("No filename or cache key setted for filename %s" %
                      filename)
        return None

    if not SessionHelper.is_file_uploaded(filename):
        if not DsCache.contains(filename):
            logging.error(
                "Filename not uploaded or not found in cache for filename %s" %
                filename)
            return None

    destination = FileUtils.get_destination(target, filename)
    if not FileUtils.is_valid_file(destination):
        if not DsCache.contains(filename):
            logging.error("Invalid file or not found in cache filename %s" %
                          filename)
            return None
        else:
            destination = filename  # Filename represents only a joined dataset key, not a real file

    return destination
示例#31
0
def bulk_analisys(filenames, plot_configs, outdir):
    try:

        results = dict()
        results["outdir"] = outdir
        results["plot_configs"] = []

        # For each plot config do a bulk analisys
        for plot_config in plot_configs:

            plot_config_outdir = "/".join([outdir, plot_config["id"]])

            dt = plot_config["dt"]
            filters = FltHelper.get_filters_clean_color_filters(
                plot_config["filters"])
            filters = FltHelper.apply_bin_size_to_filters(filters, dt)

            if "class" in plot_config:
                if plot_config["class"] == "LcPlot":

                    args = ['--outdir', plot_config_outdir]
                    args.extend(['--bintime', str(dt)])

                    args = add_filter_to_args(args, filters,
                                              CONFIG.TIME_COLUMN,
                                              '--safe-interval')
                    args = add_filter_to_args(args, filters, "PI",
                                              '--pi-interval')
                    args = add_filter_to_args(args, filters, "E",
                                              '--e-interval')
                    #args = add_filter_to_args(args, filters, "PHA", '--pha-interval')
                    #args = add_filter_to_args(args, filters, "RATE", '--rate-interval')

                    args.extend(filenames)

                    logging.debug("Calling MPlcurve, args: " + str(args))

                    MPlcurve(args)

                    push_plotconfig_results(results["plot_configs"],
                                            plot_config["id"],
                                            plot_config_outdir)

                elif plot_config["class"] == "PDSPlot":

                    logging.error(
                        "PDSPlot not supported yet, still work in progress!!")
                    ''' TO MANY QUESTIONS OPENED FOR IMPLEMENT PDS ON BULK ANALYSIS
                    args = ['--outdir', plot_config_outdir]

                    args.extend(['--kind', "PDS"])
                    args.extend(['--bintime', str(dt)])

                    # Normalization: MaltPyn supports Leahy or rms, now DAVE sends leahy, frac, abs or none
                    args.extend(['--norm', str(plot_config["norm"])])

                    # Rebin: is this
                    args.extend(['--rebin', str(plot_config["norm"])])

                    args.extend(filenames)

                    MPfspec(args)

                    push_plotconfig_results(results["plot_configs"], plot_config["id"], plot_config_outdir)
                    '''

                else:
                    logging.error("PlotConfig.class not supported!!")
            else:
                logging.error("PlotConfig has no class key!!")

        return results

    except:
        logging.error(ExHelper.getException('bulk_analisys'))
        return None
示例#32
0
def get_file_dataset(destination, time_offset=0):

    dataset = None
    cache_key = ""

    try:

        if destination:

            cache_key = get_cache_key_for_destination(destination, time_offset)
            if DsCache.contains(cache_key):
                logging.debug("get_file_dataset: returned cached dataset, cache_key: " + str(cache_key))
                return DsCache.get(cache_key), cache_key

            logging.debug("get_file_dataset: reading destination: " + str(destination))
            filename = os.path.splitext(destination)[0]
            file_extension_from_file = os.path.splitext(destination)[1]
            file_extension = magic.from_file(destination)
            logging.debug("File extension: %s" % file_extension)

            if file_extension.find("ASCII") == 0:

                table_id = "EVENTS"
                header_names = [CONFIG.TIME_COLUMN, "PHA", "Color1", "Color2"]
                dataset = get_txt_dataset(destination, table_id, header_names)

                table = dataset.tables[table_id]
                table.add_columns(["AMPLITUDE"])
                numValues = len(table.columns[CONFIG.TIME_COLUMN].values)
                random_values = np.random.uniform(-1, 1, size=numValues)
                table.columns["AMPLITUDE"].values = random_values

            elif file_extension.find("FITS") == 0 \
                 or file_extension.find("gzip") > -1:

                # Opening Fits
                hdulist = fits.open(destination, memmap=True)

                if get_hdu_string_from_hdulist(CONFIG.EVENTS_STRING, hdulist) != "":
                    # If EVENTS extension found, consider the Fits as EVENTS Fits
                    dataset = get_events_fits_dataset_with_stingray(destination, hdulist, dsId='FITS',
                                                       hduname=get_hdu_string_from_hdulist(CONFIG.EVENTS_STRING, hdulist),
                                                       column=CONFIG.TIME_COLUMN, gtistring=CONFIG.GTI_STRING,
                                                       extra_colums=['PI', "PHA"], time_offset=time_offset)

                elif 'RATE' in hdulist:
                    # If RATE extension found, consider the Fits as LIGHTCURVE Fits
                    dataset = get_lightcurve_fits_dataset_with_stingray(destination, hdulist, hduname='RATE',
                                                                column=CONFIG.TIME_COLUMN, gtistring=CONFIG.GTI_STRING, time_offset=time_offset)

                elif 'EBOUNDS' in hdulist:
                    # If EBOUNDS extension found, consider the Fits as RMF Fits
                    dataset = get_fits_dataset(hdulist, "RMF", ["EBOUNDS"])

                elif get_hdu_string_from_hdulist(CONFIG.GTI_STRING, hdulist) != "":
                    # If not EVENTS or RATE extension found, check if is GTI Fits
                    dataset = get_gti_fits_dataset_with_stingray(hdulist,gtistring=CONFIG.GTI_STRING, time_offset=time_offset)

                else:
                    logging.warn("Unsupported FITS type! Any table found: " + CONFIG.EVENTS_STRING + ", RATE, EBOUNDS or " + CONFIG.GTI_STRING)

            elif file_extension == "data" and (file_extension_from_file in [".p", ".nc"]):

                # If file is pickle object, tries to parse it as dataset
                dataset = load_dataset_from_intermediate_file(destination)

            else:
                logging.warn("Unknown file extension: " + str(file_extension) + " , " + str(file_extension_from_file))

            if dataset:
                DsCache.add(cache_key, dataset)
                logging.debug("get_file_dataset, dataset added to cache, cache_key: " + str(cache_key))

        else:
            logging.error("get_file_dataset: Destination is empty")

    except:
        logging.error(ExHelper.getException('get_file_dataset'))

    return dataset, cache_key
示例#33
0
def get_file_dataset(destination, time_offset=0):

    dataset = None
    cache_key = ""

    try:

        if destination:

            cache_key = get_cache_key_for_destination(destination, time_offset)
            if DsCache.contains(cache_key):
                logging.debug(
                    "get_file_dataset: returned cached dataset, cache_key: " +
                    str(cache_key))
                return DsCache.get(cache_key), cache_key

            logging.debug("get_file_dataset: reading destination: " +
                          str(destination))
            filename = os.path.splitext(destination)[0]
            file_extension_from_file = os.path.splitext(destination)[1]
            file_extension = magic.from_file(destination)
            logging.debug("File extension: %s" % file_extension)

            if file_extension.find("ASCII") == 0:

                table_id = "EVENTS"
                header_names = [CONFIG.TIME_COLUMN, "PHA", "Color1", "Color2"]
                dataset = get_txt_dataset(destination, table_id, header_names)

                table = dataset.tables[table_id]
                table.add_columns(["AMPLITUDE"])
                numValues = len(table.columns[CONFIG.TIME_COLUMN].values)
                random_values = np.random.uniform(-1, 1, size=numValues)
                table.columns["AMPLITUDE"].values = random_values

            elif file_extension.find("FITS") == 0 \
                 or file_extension.find("gzip") > -1:

                # Opening Fits
                hdulist = fits.open(destination, memmap=True)

                if get_hdu_string_from_hdulist(CONFIG.EVENTS_STRING,
                                               hdulist) != "":
                    # If EVENTS extension found, consider the Fits as EVENTS Fits
                    dataset = get_events_fits_dataset_with_stingray(
                        destination,
                        hdulist,
                        dsId='FITS',
                        hduname=get_hdu_string_from_hdulist(
                            CONFIG.EVENTS_STRING, hdulist),
                        column=CONFIG.TIME_COLUMN,
                        gtistring=CONFIG.GTI_STRING,
                        extra_colums=['PI', "PHA"],
                        time_offset=time_offset)

                elif 'RATE' in hdulist:
                    # If RATE extension found, consider the Fits as LIGHTCURVE Fits
                    dataset = get_lightcurve_fits_dataset_with_stingray(
                        destination,
                        hdulist,
                        hduname='RATE',
                        column=CONFIG.TIME_COLUMN,
                        gtistring=CONFIG.GTI_STRING,
                        time_offset=time_offset)

                elif 'EBOUNDS' in hdulist:
                    # If EBOUNDS extension found, consider the Fits as RMF Fits
                    dataset = get_fits_dataset(hdulist, "RMF", ["EBOUNDS"])

                elif get_hdu_string_from_hdulist(CONFIG.GTI_STRING,
                                                 hdulist) != "":
                    # If not EVENTS or RATE extension found, check if is GTI Fits
                    dataset = get_gti_fits_dataset_with_stingray(
                        hdulist,
                        gtistring=CONFIG.GTI_STRING,
                        time_offset=time_offset)

                else:
                    logging.warn("Unsupported FITS type! Any table found: " +
                                 CONFIG.EVENTS_STRING + ", RATE, EBOUNDS or " +
                                 CONFIG.GTI_STRING)

            elif file_extension == "data" and (file_extension_from_file
                                               in [".p", ".nc"]):

                # If file is pickle object, tries to parse it as dataset
                dataset = load_dataset_from_intermediate_file(destination)

            else:
                logging.warn("Unknown file extension: " + str(file_extension) +
                             " , " + str(file_extension_from_file))

            if dataset:
                DsCache.add(cache_key, dataset)
                logging.debug(
                    "get_file_dataset, dataset added to cache, cache_key: " +
                    str(cache_key))

        else:
            logging.error("get_file_dataset: Destination is empty")

    except:
        logging.error(ExHelper.getException('get_file_dataset'))

    return dataset, cache_key