Example #1
0
def compute_segment_lists(seglists, offset_vectors, min_segment_length, pad):
  # don't modify original
  seglists = seglists.copy()

  # ignore offset vectors referencing instruments we don't have
  offset_vectors = [offset_vector for offset_vector in offset_vectors if set(offset_vector.keys()).issubset(set(seglists.keys()))]

  # cull too-short single-instrument segments from the input
  # segmentlist dictionary;  this can significantly increase
  # the speed of the get_coincident_segmentlistdict()
  # function when the input segmentlists have had many data
  # quality holes poked out of them
  remove_too_short_segments(seglists, min_segment_length, pad)

  # extract the segments that are coincident under the time
  # slides
  new = ligolw_cafe.get_coincident_segmentlistdict(seglists, offset_vectors)

  # round to integer boundaries because lalapps_StringSearch can't accept
  # non-integer start/stop times
  # FIXME:  fix that in lalapps_StringSearch
  for seglist in new.values():
    for i in range(len(seglist)):
      seglist[i] = segments.segment(int(math.floor(seglist[i][0])), int(math.ceil(seglist[i][1])))
  # intersect with original segments to ensure we haven't expanded beyond
  # original bounds
  new &= seglists

  # again remove too-short segments
  remove_too_short_segments(new, min_segment_length, pad)

  # done
  return new
Example #2
0
def compute_segment_lists(seglists, offset_vectors, min_segment_length, pad):
    # don't modify original
    seglists = seglists.copy()

    # ignore offset vectors referencing instruments we don't have
    offset_vectors = [
        offset_vector for offset_vector in offset_vectors
        if set(offset_vector.keys()).issubset(set(seglists.keys()))
    ]

    # cull too-short single-instrument segments from the input
    # segmentlist dictionary;  this can significantly increase
    # the speed of the get_coincident_segmentlistdict()
    # function when the input segmentlists have had many data
    # quality holes poked out of them
    remove_too_short_segments(seglists, min_segment_length, pad)

    # extract the segments that are coincident under the time
    # slides
    new = ligolw_cafe.get_coincident_segmentlistdict(seglists, offset_vectors)

    # round to integer boundaries because lalapps_StringSearch can't accept
    # non-integer start/stop times
    # FIXME:  fix that in lalapps_StringSearch
    for seglist in new.values():
        for i in range(len(seglist)):
            seglist[i] = segments.segment(int(math.floor(seglist[i][0])),
                                          int(math.ceil(seglist[i][1])))
    # intersect with original segments to ensure we haven't expanded beyond
    # original bounds
    new &= seglists

    # again remove too-short segments
    remove_too_short_segments(new, min_segment_length, pad)

    # done
    return new
Example #3
0
def compute_segment_lists(seglistdict, time_slides, minimum_gap, timing_params, full_segments = True, verbose = False):
	if verbose:
		print >>sys.stderr, "constructing segment list ..."

	seglistdict = seglistdict.copy()

	if not full_segments:
		# cull too-short single-instrument segments from the input
		# segmentlist dictionary;  this can significantly increase
		# the speed of the get_coincident_segmentlistdict()
		# function when the input segmentlists have had many data
		# quality holes poked out of them
		power.remove_too_short_segments(seglistdict, timing_params)

		# extract the segments that are coincident under the time
		# slides
		new = ligolw_cafe.get_coincident_segmentlistdict(seglistdict, time_slides)

		# adjust surviving segment lengths up to the next integer
		# number of PSDs
		for seglist in new.values():
			# Try Adjusting Upper Bounds:

			# count the number of PSDs in each segment
			psds = [power.psds_from_job_length(timing_params, float(abs(seg))) for seg in seglist]

			# round up to the nearest integer.
			psds = [int(math.ceil(max(n, 1.0))) for n in psds]

			# compute the duration of each job
			durations = [power.job_length_from_psds(timing_params, n) for n in psds]

			# update segment list
			for i, seg in enumerate(seglist):
				seglist[i] = segments.segment(seg[0], seg[0] + durations[i])

			# and take intersection with original segments to
			# not exceed original bounds
			new &= seglistdict

			# Try Adjusting Lower Bounds:

			# count the number of PSDs in each segment
			psds = [power.psds_from_job_length(timing_params, float(abs(seg))) for seg in seglist]

			# round up to the nearest integer.
			psds = [int(math.ceil(max(n, 1.0))) for n in psds]

			# compute the duration of each job
			durations = [power.job_length_from_psds(timing_params, n) for n in psds]

			# update segment list
			for i, seg in enumerate(seglist):
				seglist[i] = segments.segment(seg[1] - durations[i], seg[1])

			# and take intersection with original segments to
			# not exceed original bounds
			new &= seglistdict


		# try to fill gaps between jobs
		new.protract(minimum_gap / 2).contract(minimum_gap / 2)

		# and take intersection with original segments to not
		# exceed original bounds
		seglistdict &= new

	# remove segments that are too short
	power.remove_too_short_segments(seglistdict, timing_params)

	# done
	return seglistdict
Example #4
0
def compute_segment_lists(seglistdict, time_slides, minimum_gap, timing_params, full_segments = True, verbose = False):
	if verbose:
		print >>sys.stderr, "constructing segment list ..."

	seglistdict = seglistdict.copy()

	if not full_segments:
		# cull too-short single-instrument segments from the input
		# segmentlist dictionary;  this can significantly increase
		# the speed of the get_coincident_segmentlistdict()
		# function when the input segmentlists have had many data
		# quality holes poked out of them
		power.remove_too_short_segments(seglistdict, timing_params)

		# extract the segments that are coincident under the time
		# slides
		new = ligolw_cafe.get_coincident_segmentlistdict(seglistdict, time_slides)

		# adjust surviving segment lengths up to the next integer
		# number of PSDs
		for seglist in new.values():
			# Try Adjusting Upper Bounds:

			# count the number of PSDs in each segment
			psds = [power.psds_from_job_length(timing_params, float(abs(seg))) for seg in seglist]

			# round up to the nearest integer.
			psds = [int(math.ceil(max(n, 1.0))) for n in psds]

			# compute the duration of each job
			durations = [power.job_length_from_psds(timing_params, n) for n in psds]

			# update segment list
			for i, seg in enumerate(seglist):
				seglist[i] = segments.segment(seg[0], seg[0] + durations[i])

			# and take intersection with original segments to
			# not exceed original bounds
			new &= seglistdict

			# Try Adjusting Lower Bounds:

			# count the number of PSDs in each segment
			psds = [power.psds_from_job_length(timing_params, float(abs(seg))) for seg in seglist]

			# round up to the nearest integer.
			psds = [int(math.ceil(max(n, 1.0))) for n in psds]

			# compute the duration of each job
			durations = [power.job_length_from_psds(timing_params, n) for n in psds]

			# update segment list
			for i, seg in enumerate(seglist):
				seglist[i] = segments.segment(seg[1] - durations[i], seg[1])

			# and take intersection with original segments to
			# not exceed original bounds
			new &= seglistdict


		# try to fill gaps between jobs
		new.protract(minimum_gap / 2).contract(minimum_gap / 2)

		# and take intersection with original segments to not
		# exceed original bounds
		seglistdict &= new

	# remove segments that are too short
	power.remove_too_short_segments(seglistdict, timing_params)

	# done
	return seglistdict