コード例 #1
0
def test_table_from_segments():
    segs = DataQualityDict()
    segs["test1"] = DataQualityFlag(active=[(0, 1), (3, 4)], )
    segs["test2"] = DataQualityFlag(active=[(5, 6)], )
    assert_table_equal(
        utils.table_from_segments(segs),
        EventTable(
            rows=[(0., 100., 0., 1., 10., "test1"),
                  (3., 100., 3., 4., 10., "test1"),
                  (5., 100., 5., 6., 10., "test2")],
            names=("time", "frequency", "start_time", "end_time", "snr",
                   "channel"),
        ),
    )
    assert_table_equal(
        utils.table_from_segments(segs,
                                  frequency=1234.,
                                  snr=-4.,
                                  sngl_burst=True),
        EventTable(
            rows=[(0., 1234., -4., "test1"), (3., 1234., -4., "test1"),
                  (5., 1234., -4., "test2")],
            names=("peak", "peak_frequency", "snr", "channel"),
        ),
    )
コード例 #2
0
    def calculate_confusion_matrices_per_classification(self):
        """Parameters
        ----------

        Returns
        -------
        A dict with keys of workflow IDs and values list
        of golden sets associated with that workflow
        """
        # Load classifications, and golden images from DB
        # Make sure choice is a valid index
        # Make sure to evaluate only logged in users
        # Ignore NONEOFTHEABOVE classificatios when constructing confusion
        # matrix
        # Make sure to the subject classified was a golden image
        query = 'classificationsdev WHERE \"annotations_value_choiceINT\" != \
            -1 AND \"links_user\" != 0 AND \
            \"annotations_value_choiceINT\" != 12 AND \
            CAST(links_subjects AS FLOAT) IN \
            (SELECT \"links_subjects\" FROM goldenimages)'

        columns = [
            'id', 'links_user', 'links_subjects', 'links_workflow',
            'annotations_value_choiceINT'
        ]
        classifications = EventTable.fetch('gravityspy',
                                           query,
                                           columns=columns)

        classifications = classifications.to_pandas()
        classifications = classifications.sort_values('id')
        golden_images = EventTable.fetch('gravityspy', 'goldenimages')
        golden_images_df = golden_images.to_pandas()

        # From answers Dict determine number of classes
        numClasses = len(self.get_answers(workflow=7766).values()[0])

        # merge the golden image DF with th classification (this merge is on
        # links_subject (i.e. the zooID of the image classified)
        image_and_classification = classifications.merge(golden_images_df,
                                                         on=['links_subjects'])

        # groupby users to get there gold classifications
        tmp = image_and_classification.groupby('links_user')[[
            'annotations_value_choiceINT', 'GoldLabel', 'id'
        ]]
        user_confusion_matrices = {}
        for key, item in tmp:
            user_confusion_matrices[key] = {}
            userlabels = tmp.get_group(key)
            rows = []
            cols = []
            entry = []
            for ilabel in userlabels.sort_values('id').itertuples():
                rows.append(ilabel[2])
                cols.append(ilabel[1])
                entry.append(1)
                user_confusion_matrices[key][ilabel[3]] = coo_matrix(
                    (entry, (rows, cols)), shape=(numClasses, numClasses))
        return user_confusion_matrices
コード例 #3
0
def Time_Scatter(times, deps, xname, yname, title, fname, flags, flagnames, 
            s=10, c='b', marker='o', figsize=[12,4]):
    """Make a scatter plot of some time-dependent data in lists, with arbitrary segment bars
    underneath.
    Arguments:
    times -- List of times to plot.
    deps -- List of time-dependent data values to plot
    xname -- String describing x-axis information
    yname -- String describing y-axis information
    flags -- A list of DQflags to plot in segment bars under the plot
    flagnames -- List of strings with which to label the segment bars.
                 Use an empty string to 'skip' an  entry.
    s -- Integer denoting plot marker size.
    c -- String for plot marker colour.
    marker -- String for plot marker shape.
    figsize -- Standard figsize keyword argument.
    
    Returns:
    None"""
    toplot = EventTable(data=[times, deps], masked=None, names=[xname, yname])
    plot = toplot.scatter(xname, yname, s=1, c='r', marker='s', figsize=[16,6], ylabel=yname, title=title)
    for i in range(len(flags)):
        plot.add_segments_bar(flags[i], label=flagnames[i])
    plot.savefig(fname, dpi=300, format='png', transparent=True)
    plot.close()
コード例 #4
0
    def clean(self):
        cleaned_data = super(LIGOSearchForm, self).clean()
        zooid = cleaned_data.get('zooid')
        imageid = cleaned_data.get('imageid')
        gpstime = cleaned_data.get('gpstime')

        if (zooid and imageid and gpstime) or (zooid and imageid) or \
               (zooid and gpstime) or (gpstime and imageid):
            raise forms.ValidationError("Please fill out "
                                        "only one of the zooid "
                                        "or gravityspy id fields"
                                        )

        elif (not zooid) and (not imageid) and (not gpstime):
            raise forms.ValidationError("Please fill out "
                                        "one but not both of the zooid "
                                        "and gravityspy id fields"
                                        )

        if zooid and not imageid and not gpstime:
            if EventTable.fetch('gravityspy', 'similarityindex WHERE links_subjects = {0}'.format(zooid), columns=['links_subjects']).to_pandas().empty:
                raise forms.ValidationError("zooid does not exist"
                                        )

        if imageid and not zooid and not gpstime:
            if EventTable.fetch('gravityspy', 'similarityindex WHERE \"uniqueID\" = \'{0}\''.format(imageid), columns=['uniqueID']).to_pandas().empty:
                raise forms.ValidationError("uniqueid does not exist"
                                        )
コード例 #5
0
    def __init__(self, paramsfile, eventNumber):
        """Initialize an XEvent on-source off-source or injection with pfile

        Parameters
        ----------
        paramsfile (str):
            a xpipeline param file
        eventNumber (int):
            an integer refering to what events from the
            input/event_off/on/inj.txt to grab for processing

        Returns:

            `XEvent`
        """
        print("You are generating an xevent by supplying a "
              "a xpipeline params file, this will overwite the defaults")
        with open(paramsfile, 'r') as f:
            for line in f.readlines():
                parsed_text = line.split('\n')[0].split(':')
                # check if param is also command separated
                try:
                    parsed_text[1].split(',')[1]
                    setattr(self, parsed_text[0], parsed_text[1].split(','))
                except:
                    setattr(self, parsed_text[0], parsed_text[1])

        self.phi = list(
            EventTable.read(self.skyPositionList, format='ascii')['col2'])
        self.theta = list(
            EventTable.read(self.skyPositionList, format='ascii')['col1'])
        self.event_time = list(
            EventTable.read(self.eventFileName,
                            format='ascii')['col1'])[eventNumber]

        for key, item in vars(self).items():
            try:
                setattr(self, key, float(item))
            except:
                pass

        analysistimes = [float(i) for i in self.analysistimes]
        self.analysistimes = analysistimes
        channel_names = []
        frame_types = []
        detectors = {}
        with open(self.channelFileName, 'r') as f:
            for det in f.readlines():
                detector_name = det.split(' ')[0].split(':')[0]
                channel_names.append(det.split(' ')[0])
                frame_types.append(det.split(' ')[1])
                detectors[detector_name] = Detector(detector_name)

        self.channel_names = channel_names
        self.frame_types = frame_types
        self.detectors = detectors
コード例 #6
0
ファイル: utils.py プロジェクト: gwdetchar/hveto
def primary_vetoed(starttime=None, hveto_path=None, snr=6.0, significance=5.0):
    """Catalogue all vetoed primary triggers from a given analysis

    This utility queries the output of an hveto analysis for the triggers
    vetoed from its primary channel over all rounds (up to thresholds on
    signal-to-noise ratio and round significance).

    Parameters
    ----------
    starttime : `str` or `float`
        start GPS time for this analysis

    hveto_path : 'str'
        path of the hveto files directory,
        not required if ``starttime`` is given

    snr : `float`, optional
        signal-to-noise ratio threshold on triggers, default: 6.0

    significance : `float`, optional
        hveto significance threshold on auxiliary channels, default: 5.0

    Returns
    -------
    catalogue : `~gwpy.table.EventTable`
        a tabular catalogue of primary triggers vetoed in the hveto run
    """
    path = const.get_hvetopath(starttime) if starttime else hveto_path
    t_vetoed = EventTable(names=[
        'time', 'snr', 'peak_frequency', 'channel', 'winner', 'significance'
    ])
    try:
        files = glob.glob(os.path.join(path, 'triggers', '*VETOED*.txt'))
        t_summary = EventTable.read(os.path.join(path, 'summary-stats.txt'),
                                    format='ascii')
        n = len(t_summary)
        files = files[:n]
        t_vetoed = EventTable.read(files, format='ascii')
        lenoffiles = t_summary['nveto']
        winsig = [
            round(t_summary['significance'][i], 4) for i in range(n)
            for j in range(lenoffiles[i])
        ]
        winchans = [
            t_summary['winner'][i] for i in range(n)
            for j in range(lenoffiles[i])
        ]
        rounds = [i + 1 for i in range(n) for j in range(lenoffiles[i])]
        colsig = Column(data=winsig, name='significance')
        colwin = Column(data=winchans, name='winner')
        colround = Column(data=rounds, name='round')
        t_vetoed.add_column(colwin)
        t_vetoed.add_column(colsig)
        t_vetoed.add_column(colround)
        t_vetoed = t_vetoed.filter('snr>{0}'.format(snr),
                                   'significance>{0}'.format(significance))
    except (FileNotFoundError, ValueError):
        warnings.warn("Could not find Hveto analysis for this day")
    return t_vetoed
コード例 #7
0
def retrain_model(request):
    # if this is a POST request we need to process the form data
    if request.method == 'POST':

        # create a form instance and populate it with data from the request:
        form = NewClassForm(request.POST)
        # check whether it's valid:
        if form.is_valid():
            collection_owner = str(form.cleaned_data['collection_owner'])
            collection_name = str(form.cleaned_data['collection_name'])
            new_class_name = str(form.cleaned_data['new_class_name'])

            # First determine the subjects attempting to be added to the training set
            subjects_in_collection, tmp = retrieve_subjects_from_collection(collection_owner, collection_name)
            subjects_in_collection = [str(isubject) for isubject in subjects_in_collection]

            new_subjects = list(EventTable.fetch('gravityspy',
                                                 'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(str(",".join(subjects_in_collection))),
                                                  columns=["gravityspy_id"], host='gravityspyplus.ciera.northwestern.edu')['gravityspy_id'])

            requested_model, created = NewClass.objects.get_or_create(collection_owner=collection_owner,
                                                                      collection_name=collection_name,
                                                                      new_class_name=new_class_name,
                                                                      new_subjects=new_subjects,
                                                                      user=request.user)
            requested_model.save()

            return render(request, 'temp.html') 
        else:
            return render(request, 'retrain-model-form.html', {'form': form})
コード例 #8
0
def test_table_from_segments_empty():
    segs = DataQualityDict()
    segs['test'] = DataQualityFlag(active=[])
    assert_table_equal(
        utils.table_from_segments(segs),
        EventTable(names=("time", "frequency", "start_time", "end_time", "snr",
                          "channel")))
コード例 #9
0
def test_table_from_times():
    times = numpy.array(range(10), dtype=float)
    assert_table_equal(
        utils.table_from_times(times),
        EventTable([times, [100.] * 10, [10.] * 10],
                   names=("time", "frequency", "snr")),
    )
コード例 #10
0
def dategraph(request):
    # if this is a POST request we need to process the form data
    if request.method == 'GET':

        # create a form instance and populate it with data from the request:
        form = SearchForm(request.GET)
        # check whether it's valid:
        if form.is_valid():
            username = str(form.cleaned_data['username'])
            collection_display_name = str(
                form.cleaned_data['collection_display_name'])

            subjects_in_collection, tmp = retrieve_subjects_from_collection(
                username, collection_display_name)
            subjects_in_collection = [
                str(isubject) for isubject in subjects_in_collection
            ]
            SI_glitches = EventTable.fetch(
                'gravityspy',
                'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(
                    str(",".join(subjects_in_collection))),
                host='gravityspyplus.ciera.northwestern.edu').to_pandas()
            fig = obtain_figure(SI_glitches)
            canvas = FigureCanvas(fig)
            buf = io.BytesIO()
            canvas.print_png(buf)
            response = HttpResponse(buf.getvalue(), content_type='image/png')
            fig.clear()
            return response
コード例 #11
0
def collectioninfo(request):
    # if this is a POST request we need to process the form data
    if request.method == 'GET':

        # create a form instance and populate it with data from the request:
        form = SearchForm(request.GET)
        # check whether it's valid:
        if form.is_valid():
            username = str(form.cleaned_data['username'])
            collection_display_name = str(
                form.cleaned_data['collection_display_name'])

            subjects_in_collection, tmp = retrieve_subjects_from_collection(
                username, collection_display_name)
            subjects_in_collection = [
                str(isubject) for isubject in subjects_in_collection
            ]
            SI_glitches = EventTable.fetch(
                'gravityspy',
                'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(
                    str(",".join(subjects_in_collection))),
                host='gravityspyplus.ciera.northwestern.edu').to_pandas()
            dategraph_url = request.get_full_path()[::-1].replace(
                'collection-info'[::-1], 'dategraph'[::-1], 1)[::-1]

            return render(
                request, 'collection_results.html', {
                    'results': SI_glitches.to_dict(orient='records'),
                    'dategraph_url': dategraph_url
                })
        else:
            return render(request, 'collectioninfo.html', {'form': form})
コード例 #12
0
    def check_level_by_classification(self):
        # Obtain workflow order
        order = self.project_info['configuration_workflow_order']
        workflows = [int(str(iWorkflow)) for iWorkflow in order]
        levelWorkflowDict = dict(enumerate(workflows))
        workflowLevelDict = dict(
            (v, k + 1) for k, v in levelWorkflowDict.iteritems())

        query = 'classificationsdev GROUP BY links_user, links_workflow'
        userlevels = EventTable.fetch('gravityspy',
                                      query,
                                      columns=['links_user', 'links_workflow'])

        userlevels = userlevels.to_pandas()
        userlevels['Level'] = userlevels.links_workflow.apply(
            lambda x: workflowLevelDict[x])

        init_user_levels = userlevels.groupby('links_user').Level.max()

        init_user_levels_dict = {
            'userID': init_user_levels.index.tolist(),
            'workflowInit': init_user_levels.tolist()
        }

        userStatusInit = pd.DataFrame(init_user_levels_dict)
        self.userStatusInit = userStatusInit
        return userStatusInit
コード例 #13
0
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
    """Read a table of events from a cache

    This function is mainly meant for use from the `get_triggers` method

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        the formatted list of files to read
    segments : `~gwpy.segments.SegmentList`
        the list of segments to read
    etg : `str`
        the name of the trigger generator that created the files
    nproc : `int`, optional
        the number of parallel processes to use when reading
    **kwargs
        other keyword arguments are passed to the `EventTable.read` or
        `{tableclass}.read` methods

    Returns
    -------
    table : `~gwpy.table.EventTable`, `None`
        a table of events, or `None` if the cache has no overlap with
        the segments
    """
    if isinstance(cache, Cache):
        cache = cache.sieve(segmentlist=segments)
        cache = cache.checkfilesexist()[0]
        cache.sort(key=lambda x: x.segment[0])
        cache = cache.pfnlist()  # some readers only like filenames
    else:
        cache = [urlparse(url).path for url in cache]
    if etg == 'pycbc_live':  # remove empty HDF5 files
        cache = filter_pycbc_live_files(cache, ifo=kwargs['ifo'])

    if len(cache) == 0:
        return

    # read triggers
    table = EventTable.read(cache, **kwargs)

    # store read keywords in the meta table
    if timecolumn:
        table.meta['timecolumn'] = timecolumn

    # get back from cache entry
    if isinstance(cache, CacheEntry):
        cache = Cache([cache])

    # append new events to existing table
    try:
        csegs = cache_segments(cache) & segments
    except (AttributeError, TypeError, ValueError):
        csegs = SegmentList()
    table.meta['segments'] = csegs

    if timecolumn:  # already filtered on-the-fly
        return table
    # filter now
    return keep_in_segments(table, segments, etg)
コード例 #14
0
ファイル: views.py プロジェクト: olipatane/gravityspytools
def collectioninfo(request):
    # if this is a POST request we need to process the form data
    if request.method == 'GET':

        # create a form instance and populate it with data from the request:
        form = SearchForm(request.GET)
        # check whether it's valid:
        if form.is_valid():
            username = str(form.cleaned_data['username'])
            collection_display_name = str(form.cleaned_data['collection_display_name'])

            subjects_in_collection = retrieve_subjects_from_collection(username, collection_display_name)
            subjects_in_collection = [str(isubject) for isubject in subjects_in_collection]
            SI_glitches = EventTable.fetch('gravityspy', 'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(str(",".join(subjects_in_collection)))).to_pandas() 
            
                GlitchValues = SI_glitches.values
                GlitchGPS = (GlitchValues[:,4]).tolist()
                newestGPS = (max(GlitchGPS))
                oldestGPS = (min(GlitchGPS))
                print (newestGPS)
                print (oldestGPS)

                #converting newest and oldest to dates
                newest = timeconvert.gps2ppl(newestGPS)
                oldest = timeconvert.gps2ppl(oldestGPS)
                print (newest)
                print (oldest)

                #number of weeks
                def nofweeks(d1,d2):
                    d1 = datetime.datetime.strptime(d1, "%Y-%m-%d %H:%M")
                    d2 = datetime.datetime.strptime(d2, "%Y-%m-%d %H:%M")
                    hop = ((abs((d2 - d1).days))/7)
                    if hop > 1:
                        return hop
                    elif hop <= 1:
                        return 1
                print (nofweeks(newest, oldest))

                #checking and changing xlocs, xlabels
                plt.hist(GlitchGPS, bins = (nofweeks(newest, oldest)))
                xlocs, xlabels = plt.xticks()
                print (xlocs)
    
                GPSlabels = list()
                for x in range(len(xlocs)):
                    GPSlabels.append(timeconvert.gps2ppl(xlocs[x]))
                plt.xticks(xlocs, GPSlabels, rotation = 45)
                plt.title("Distribution of Glitches \n Each Bin Represents One Week")
                plt.xlabel("Time")
                plt.ylabel("Number of Glitches per Week")
                plt.tight_layout()
                plt.hist(GlitchGPS, bins = (nofweeks(newest, oldest)))
                xlocs, xlabels = plt.xticks()
                print (xlocs)

                plt.savefig("final.png")
            
            return render(request, 'searchresults.html', {'results': SI_glitches.to_dict(orient='records')})
コード例 #15
0
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
    """Read a table of events from a cache

    This function is mainly meant for use from the `get_triggers` method

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        the formatted list of files to read
    segments : `~gwpy.segments.SegmentList`
        the list of segments to read
    etg : `str`
        the name of the trigger generator that created the files
    nproc : `int`, optional
        the number of parallel processes to use when reading
    **kwargs
        other keyword arguments are passed to the `EventTable.read` or
        `{tableclass}.read` methods

    Returns
    -------
    table : `~gwpy.table.EventTable`, `None`
        a table of events, or `None` if the cache has no overlap with
        the segments
    """
    if isinstance(cache, Cache):
        cache = cache.sieve(segmentlist=segments)
        cache = cache.checkfilesexist()[0]
        cache.sort(key=lambda x: x.segment[0])
        if etg == 'pycbc_live':  # remove empty HDF5 files
            cache = type(cache)(
                filter_pycbc_live_files(cache, ifo=kwargs['ifo']))
    # if no files, skip
    if len(cache) == 0:
        return
    # use multiprocessing except for ascii reading
    # (since astropy doesn't allow it)
    if kwargs.get('format', 'none').startswith('ascii.'):
        cache = cache.pfnlist()
    else:
        kwargs['nproc'] = nproc
    if len(cache) == 1:
        cache = cache[0]

    # read triggers
    table = EventTable.read(cache, **kwargs)
    if timecolumn:
        table.meta['timecolumn'] = timecolumn

    # get back from cache entry
    if isinstance(cache, CacheEntry):
        cache = Cache([cache])
    # append new events to existing table
    try:
        csegs = cache_segments(cache)
    except (AttributeError, TypeError):
        csegs = SegmentList()
    table.meta['segments'] = csegs
    return keep_in_segments(table, segments, etg)
コード例 #16
0
ファイル: test_archive.py プロジェクト: eagoetz/gwsumm
def test_archive_load_table():
    t = EventTable(random.random((100, 5)), names=['a', 'b', 'c', 'd', 'e'])
    empty = EventTable(names=['a', 'b'])
    try:
        fname = tempfile.mktemp(suffix='.h5', prefix='gwsumm-tests-')
        h5file = h5py.File(fname, mode='a')
        # check table gets archived and read transparently
        archive.archive_table(t, 'test-table', h5file)
        t2 = archive.load_table(h5file['test-table'])
        nptest.assert_array_equal(t.as_array(), t2.as_array())
        assert t.dtype == t2.dtype
        # check empty table does not get archived, with warning
        with pytest.warns(UserWarning):
            n = archive.archive_table(empty, 'test-empty', h5file)
        assert n is None
        assert 'test-empty' not in h5file
    finally:
        if os.path.exists(fname):
            os.remove(fname)
コード例 #17
0
ファイル: test_archive.py プロジェクト: gwpy/gwsumm
def test_archive_load_table():
    t = EventTable(random.random((100, 5)),
                   names=['a', 'b', 'c', 'd', 'e'])
    empty = EventTable(names=['a', 'b'])
    try:
        fname = tempfile.mktemp(suffix='.h5', prefix='gwsumm-tests-')
        h5file = h5py.File(fname)
        # check table gets archived and read transparently
        archive.archive_table(t, 'test-table', h5file)
        t2 = archive.load_table(h5file['test-table'])
        nptest.assert_array_equal(t.as_array(), t2.as_array())
        assert t.dtype == t2.dtype
        # check empty table does not get archived, with warning
        with pytest.warns(UserWarning):
            n = archive.archive_table(empty, 'test-empty', h5file)
        assert n is None
        assert 'test-empty' not in h5file
    finally:
        if os.path.exists(fname):
            os.remove(fname)
コード例 #18
0
ファイル: test_archive.py プロジェクト: gwpy/gwsumm
def test_write_archive(delete=True):
    empty_globalv()
    data.add_timeseries(TEST_DATA)
    data.add_timeseries(create([1, 2, 3, 4, 5],
                               dt=60., channel='X1:TEST-TREND.mean'))
    data.add_timeseries(create([1, 2, 3, 2, 1],
                               series_class=StateVector,
                               channel='X1:TEST-STATE_VECTOR'))
    data.add_spectrogram(create([[1, 2, 3], [3, 2, 1], [1, 2, 3]],
                                series_class=Spectrogram,
                                channel='X1:TEST-SPECTROGRAM'))
    t = EventTable(random.random((100, 5)), names=['time', 'a', 'b', 'c', 'd'])
    t.meta['segments'] = SegmentList([Segment(0, 100)])
    triggers.add_triggers(t, 'X1:TEST-TABLE,testing')
    fname = tempfile.mktemp(suffix='.h5', prefix='gwsumm-tests-')
    try:
        archive.write_data_archive(fname)
        archive.write_data_archive(fname)  # test again to validate backups
    finally:
        if delete and os.path.isfile(fname):
            os.remove(fname)
    return fname
コード例 #19
0
def getOmicronTriggers(start, end, channel, max_snr, segs=None):
    try:
        cache = find_trigger_files(channel, 'OMICRON', start, end)
        t = EventTable.read(cache,
                            format='ligolw',
                            tablename='sngl_burst',
                            selection=['snr<=%f' % max_snr])
        if (segs is not None):
            t = t.filter(('peak_time', in_segmentlist, segs))
        print("SUCCESS fetch for " + str(channel))
        return t
    except:
        print("failed fetch for " + str(channel))
コード例 #20
0
ファイル: test_archive.py プロジェクト: tjma12/gwsumm
def test_write_archive(delete=True):
    empty_globalv()
    data.add_timeseries(TEST_DATA)
    data.add_timeseries(create([1, 2, 3, 4, 5],
                               dt=60., channel='X1:TEST-TREND.mean'))
    data.add_timeseries(create([1, 2, 3, 2, 1],
                               series_class=StateVector,
                               channel='X1:TEST-STATE_VECTOR'))
    data.add_spectrogram(create([[1, 2, 3], [3, 2, 1], [1, 2, 3]],
                                series_class=Spectrogram,
                                channel='X1:TEST-SPECTROGRAM'))
    t = EventTable(random.random((100, 5)), names=['time', 'a', 'b', 'c', 'd'])
    t.meta['segments'] = SegmentList([Segment(0, 100)])
    triggers.add_triggers(t, 'X1:TEST-TABLE,testing')
    fname = tempfile.mktemp(suffix='.h5', prefix='gwsumm-tests-')
    try:
        archive.write_data_archive(fname)
        archive.write_data_archive(fname)  # test again to validate backups
    finally:
        if delete and os.path.isfile(fname):
            os.remove(fname)
    return fname
コード例 #21
0
ファイル: utils.py プロジェクト: andrew-lundgren/gwdetchar
def table_from_segments(flagdict, sngl_burst=False, snr=10., frequency=100.):
    """Build an `EventTable` from a `DataQualityDict`
    """
    rows = []
    if sngl_burst:
        names = ("peak", "peak_frequency", "snr", "channel")
        def row(seg, channel):
            a, b = map(float, seg)
            return a, frequency, snr, channel
    else:
        names = ("time", "frequency", "start_time", "end_time",
                 "snr", "channel")
        def row(seg, channel):
            a, b = map(float, seg)
            return a, frequency, a, b, snr, channel

    for name, flag in flagdict.items():
        rows.extend(map(partial(row, channel=name), flag.active))
    table = EventTable(rows=rows, names=names)
    if sngl_burst:  # add tablename for GWpy's ligolw writer
        table.meta["tablename"] = "sngl_burst"
    return table
コード例 #22
0
def searchDB(form):

    # process the data in form.cleaned_data as required
    # process the data in form.cleaned_data as required
    glitchclass = str(form.cleaned_data['glitchclass'])

    SI_glitches = EventTable.fetch('gravityspy', 'trainingset WHERE \"Label\" = \'{0}\''.format(glitchclass), columns = ['gravityspy_id', 'Filename1', 'Filename2', 'Filename3', 'Filename4', 'ifo', 'snr', 'peak_frequency', 'Label']).to_pandas()

    SI_glitches['url1'] = SI_glitches[['ifo', 'Filename1']].apply(makelink, axis=1)
    SI_glitches['url2'] = SI_glitches[['ifo', 'Filename2']].apply(makelink, axis=1)
    SI_glitches['url3'] = SI_glitches[['ifo', 'Filename3']].apply(makelink, axis=1)
    SI_glitches['url4'] = SI_glitches[['ifo', 'Filename4']].apply(makelink, axis=1)

    return SI_glitches
コード例 #23
0
def main():
    # get the command line args
    args = parser()
    np.random.seed(args.seed)

    # set path to file
    cur_path = os.path.dirname(__file__)
    new_path = os.path.relpath(args.dataset, cur_path)

    # load dataset
    data = load_data(new_path)

    # redefine things for conciseness
    Tobs = args.Tobs  # observation time
    fs = args.fsample  # sampling frequency
    dets = args.detectors  # detectors
    ndet = len(dets)  # number of detectors
    N = Tobs * fs  # the total number of time samples
    n = N // 2 + 1  # the number of frequency bins
    tmp_bank = args.temp_bank  # template bank file
    f_low = args.cutoff_freq  # cutoff frequency used in template generation

    psds = [gen_psd(fs, Tobs, op='AdvDesign', det=d) for d in args.detectors]
    wpsds = (2.0 / fs) * np.ones(
        (ndet, n))  # define effective PSD for whited data

    # load template bank
    tmp_bank = np.array(
        EventTable.read(tmp_bank,
                        format='ligolw.sngl_inspiral',
                        columns=['mass1', 'mass2', 'eta', 'mchirp']))

    # loop over stuff
    output, chi_test = looper(data, tmp_bank, Tobs, fs, dets, psds, wpsds,
                              args.basename, args.w_basename, args.cutoff_freq,
                              args.wave_bank)
    chi_test = [chi_test, data[1]]
    output = [output, data[1]]

    # save list of rho for test signals and test noise
    pickle_out = open("%srho_values.pickle" % args.basename, "wb")
    pickle.dump(output, pickle_out)
    pickle_out.close()

    # save list of chi rho for test purposes only
    pickle_out = open("%schirho_values.pickle" % args.basename, "wb")
    pickle.dump(chi_test, pickle_out)
    pickle_out.close()
コード例 #24
0
def training_set_raw_data(filename,
                          format,
                          duration=8,
                          sample_frequency=4096,
                          verbose=False,
                          **kwargs):
    """Obtain the raw timeseries for the whole training set

    Parameters:

        filename (str):

        format (str):

        duration (int, optional):

        sample_frequency (int, optional):

        verbose (bool, optional):

    Returns:

        A file containing the raw timeseries data of the training set
    """
    logger = log.Logger('Gravity Spy: Obtaining TimeSeries'
                        ' Data For Trainingset')
    trainingset_table = EventTable.fetch(
        'gravityspy',
        'trainingsetv1d1',
        columns=['event_time', 'ifo', 'true_label'])
    for ifo, gps, label in zip(trainingset_table['ifo'],
                               trainingset_table['event_time'],
                               trainingset_table['true_label']):
        logger.info('Obtaining sample {0} with gps {1} from '
                    '{2}'.format(label, gps, ifo))
        data = fetch_data(ifo,
                          gps,
                          duration=duration,
                          sample_frequency=sample_frequency,
                          verbose=verbose,
                          **kwargs)
        logger.info('Writing Sample To File..')
        data.write(filename,
                   format=format,
                   append=True,
                   path='/data/{0}/{1}/'.format(label, gps))
コード例 #25
0
def Get_Rates_3(chunks, segs, verbose = False):
    """Returns the glitch rates for a given set of time chunks
    defined by a list of start times, with an end time at the last entry.
    
    Arguments:
    chunks -- Sorted list of times representing the beginnings of the 
              time periods for which rate is to be calculated, with 'end' 
              tacked on.
    segs -- Ordered and non-overlapping SegmentList such that every 
            element in 'chunks' (except the last one) is in an entry in 
            'segs'.
    verbose -- Set to 'True' if you want to see the ends of each chunk in
               'chunks' printed as it is processed.
    
    Returns:
    normcounts -- A list of glitch rates (Hz) associated with each time
                  period represented in 'chunks'."""
    traced = False
    normcounts = []
    j = 0
    for i in range(len(chunks)-1):
        while not chunks[i] in segs[j]:
            j = j+1
        segend = segs[j][1]
        if chunks[i+1]>segend:
            chunkend = segend
        else:
            chunkend = chunks[i+1]
        if verbose:
            print(from_gps(chunks[i]), from_gps(chunkend))
        files = find_trigger_files('L1:GDS-CALIB_STRAIN', 'Omicron', chunks[i], chunkend)
        if len(files)>0:
            events = EventTable.read(files, format='ligolw', tablename='sngl_burst', 
                                     columns=['peak','peak_time_ns', 'peak_frequency', 'snr'])
            events = events[(events['peak']>=chunks[i]) & (events['peak']<chunkend)]  
            counts = len(events['peak'])
            length = chunkend - chunks[i]
            normcount = counts/(length)
            normcounts.append(normcount)
        else:
            normcounts.append(0)
        
    return normcounts
コード例 #26
0
ファイル: histogram-1.py プロジェクト: gwpy/gwpy.github.io
from gwpy.table import EventTable
events = EventTable.fetch_open_data(
    "GWTC-1-confident",
    columns=("mass1", "mass2"),
)
events.add_column(events["mass1"] + events["mass2"], name="mtotal")
コード例 #27
0
ファイル: rate_binned.py プロジェクト: rpfisher/gwpy
The data from which these events were generated are a simulation of Gaussian noise
with the Advanced LIGO design spectrum, and so don't actually contain any real
gravitational waves, but will help tune the algorithm to improve detection of
future, real signals.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.table'

# First, we import the `EventTable` object and read in a set of events from
# a LIGO_LW-format XML file containing a
# :class:`sngl_burst <glue.ligolw.lsctables.SnglBurstTable>` table
from gwpy.table import EventTable
events = EventTable.read(
    '../../gwpy/tests/data/H1-LDAS_STRAIN-968654552-10.xml.gz',
    format='ligolw.sngl_burst', columns=['time', 'snr'])

# .. note::
#
#    Here we manually specify the `columns` to read in order to optimise
#    the `read()` operation to parse only the data we actually need.

# Now we can use the :meth:`~EventTable.binned_event_rates` method to
# calculate the event rate in a number of bins of SNR.
rates = events.binned_event_rates(1, 'snr', [2, 3, 5, 8], operator='>=',
                                  start=968654552, end=968654562)
# .. note::
#
#    The list `[2, 3, 5, 8]` and operator `>=` specifies SNR tresholds of 
#    2, 3, 5, and 8.
コード例 #28
0
ファイル: rate_binned-1.py プロジェクト: gwpy/gwpy.github.io
from gwpy.table import EventTable
events = EventTable.read('H1-LDAS_STRAIN-968654552-10.xml.gz',
                         tablename='sngl_burst', columns=['time', 'snr'])
コード例 #29
0
ファイル: scatter-1.py プロジェクト: gwpy/gwpy.github.io
from gwpy.table import EventTable
events = EventTable.read(
    'H1-LDAS_STRAIN-968654552-10.xml.gz', format='ligolw.sngl_burst',
    columns=['time', 'central_freq', 'snr'])
コード例 #30
0
ファイル: scatter-1.py プロジェクト: gwpy/gwpy.github.io
from gwpy.table import EventTable
events = EventTable.read(
    'H1-LDAS_STRAIN-968654552-10.xml.gz', tablename='sngl_burst',
    columns=['peak', 'central_freq', 'snr'])
コード例 #31
0
ファイル: scatter.py プロジェクト: gwpy/gwpy
"""Plotting an `EventTable` in a scatter

We can use GWpy's `EventTable` to download the catalogue of gravitational-wave
detections, and create a scatter plot to investigate the mass distribution
of events.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.table'

# First, we can download the ``'GWTC-1-confident'`` catalogue using
# :meth:`EventTable.fetch_open_data`:

from gwpy.table import EventTable
events = EventTable.fetch_open_data(
    "GWTC-1-confident",
    columns=("mass1", "mass2", "E_rad", "distance"),
)

# We can now make a scatter plot by specifying the x- and y-axis columns,
# and (optionally) the colour:

plot = events.scatter("mass1", "mass2", color="E_rad")
plot.colorbar(label="E_rad [{}]".format(r"M$_{\odot}$ c$^{2}$"))
plot.show()

# We can similarly plot how the total event mass is distributed with
# distance.  First we have to build the total mass (``'mtotal'``) column
# from the component masses:

events.add_column(events["mass1"] + events["mass2"], name="mtotal")
コード例 #32
0
ファイル: triggers.py プロジェクト: duncanmmacleod/hveto
def get_triggers(channel, etg, segments, cache=None, snr=None, frange=None,
                 raw=False, trigfind_kwargs={}, **read_kwargs):
    """Get triggers for the given channel
    """
    etg = _sanitize_name(etg)
    # format arguments
    try:
        readfmt = read_kwargs.pop("format", DEFAULT_FORMAT[etg])
    except KeyError:
        raise ValueError("unsupported ETG {!r}".format(etg))
    trigfind_kwargs, read_kwargs = _format_params(
        channel,
        etg,
        readfmt,
        trigfind_kwargs,
        read_kwargs
    )

    # find triggers
    if cache is None:
        cache = find_trigger_files(channel, etg, segments, **trigfind_kwargs)

    # read files
    tables = []
    for segment in segments:
        segaslist = SegmentList([segment])
        segcache = io_cache.sieve(cache, segment=segment)
        # try and work out if cache overextends segment (so we need to crop)
        cachesegs = io_cache.cache_segments(segcache)
        outofbounds = abs(cachesegs - segaslist)
        if segcache:
            if len(segcache) == 1:  # just pass the single filename
                segcache = segcache[0]
            new = EventTable.read(segcache, **read_kwargs)
            new.meta = {k: new.meta[k] for k in TABLE_META if new.meta.get(k)}
            if outofbounds:
                new = new[new[new.dtype.names[0]].in_segmentlist(segaslist)]
            tables.append(new)
    if len(tables):
        table = vstack_tables(tables)
    else:
        table = EventTable(names=read_kwargs.get(
            'columns', ['time', 'frequency', 'snr']))

    # parse time, frequency-like and snr-like column names
    columns = table.dtype.names
    tcolumn = columns[0]
    fcolumn = columns[1]
    scolumn = columns[2]

    # filter
    keep = numpy.ones(len(table), dtype=bool)
    if snr is not None:
        keep &= table[scolumn] >= snr
    if frange is not None:
        keep &= table[fcolumn] >= frange[0]
        keep &= table[fcolumn] < frange[1]
    table = table[keep]

    # return basic table if 'raw'
    if raw:
        return table

    # rename time column so that all tables match in at least that
    if tcolumn != "time":
        table.rename_column(tcolumn, 'time')

    # add channel column to identify all triggers
    table.add_column(table.Column(data=numpy.repeat(channel, len(table)),
                                  name='channel'))

    table.sort('time')
    return table
コード例 #33
0
ファイル: tiles-1.py プロジェクト: gwpy/gwpy.github.io
from gwpy.table import EventTable
events = EventTable.read(
    'H1-LDAS_STRAIN-968654552-10.xml.gz', tablename='sngl_burst',
    columns=['time', 'central_freq', 'bandwidth', 'duration', 'snr'])