Ejemplo n.º 1
0
    def _consolidate(self):
        new_ranges = SortedCollection(key=itemgetter(0))
        prev_begin, prev_end = self._ranges[0]
        for begin, end in self._ranges[1:]:
            if prev_end >= begin - 1:
                # Consolidate the previous and current ranges:
                prev_end = max(prev_end, end)
            else:
                # Add the previous range, and continue with the current range
                # as the seed for the next iteration:
                new_ranges.insert((prev_begin, prev_end))
                prev_begin = begin
                prev_end = end

        new_ranges.insert((prev_begin, prev_end))

        self._ranges = new_ranges
Ejemplo n.º 2
0
 def _consolidate(self):
     new_ranges = SortedCollection(key=itemgetter(0))
     prev_begin, prev_end = self._ranges[0]
     for begin, end in self._ranges[1:]:
         if prev_end >= begin - 1:
             # Consolidate the previous and current ranges:
             prev_end = max(prev_end, end)
         else:
             # Add the previous range, and continue with the current range
             # as the seed for the next iteration:
             new_ranges.insert((prev_begin, prev_end))
             prev_begin = begin
             prev_end = end
             
     new_ranges.insert((prev_begin, prev_end))
     
     self._ranges = new_ranges
Ejemplo n.º 3
0
class RangeCache(object):
    """
    RangeCache is a data structure that tracks a finite set of
      ranges (a range is a 2-tuple consisting of a numeric start
      and numeric length). New ranges can be added via the `push`
      method, and if such a call causes the capacity to be exceeded,
      then the "oldest" range is removed. The `get` method implements
      an efficient lookup for a single value that may be found within
      one of the ranges.
    """
    def __init__(self, capacity,
                 start_key=lambda o: o[0],
                 length_key=lambda o: o[1]):
        """
        @param key: A function that fetches the range start from an item.
        """
        super(RangeCache, self).__init__()
        self._ranges = SortedCollection(key=start_key)
        self._lru = BoundedLRUQueue(capacity, key=start_key)
        self._start_key = start_key
        self._length_key = length_key

    def push(self, o):
        """
        Add a range to the cache.

        If `key` is not provided to the constructor, then
          `o` should be a 3-tuple:
            - range start (numeric)
            - range length (numeric)
            - range item (object)
        """
        self._ranges.insert(o)
        popped = self._lru.push(o)
        if popped is not None:
            self._ranges.remove(popped)

    def touch(self, o):
        self._lru.touch(o)

    def get(self, value):
        """
        Search for the numeric `value` within the ranges
          tracked by this cache.
        @raise ValueError: if the value is not found in the range cache.
        """
        hit = self._ranges.find_le(value)
        if value < self._start_key(hit) + self._length_key(hit):
            return hit
        raise ValueError("%s not found in range cache" % value)

    @staticmethod
    def test():
        q = RangeCache(2)

        x = None
        try: x = q.get(0)
        except ValueError: pass
        assert x is None

        x = None
        try: x = q.get(1)
        except ValueError: pass
        assert x is None

        q.push((1, 1, [0]))

        x = None
        try: x = q.get(0)
        except ValueError: pass
        assert x is None

        assert q.get(1) == (1, 1, [0])
        assert q.get(1.99) == (1, 1, [0])
        x = None
        try: x = q.get(2.01)
        except ValueError: pass
        assert x is None

        q.push((3, 1, [1]))
        assert q.get(1) == (1, 1, [0])
        assert q.get(3) == (3, 1, [1])

        q.push((5, 1, [2]))
        x = None
        try: x = q.get(1)
        except ValueError: pass
        assert x is None

        assert q.get(3) == (3, 1, [1])
        assert q.get(5) == (5, 1, [2])

        q.touch((3, 1, [1]))
        q.push((7, 1, [3]))

        assert q.get(3) == (3, 1, [1])
        assert q.get(7) == (7, 1, [3])
        x = None
        try: x = q.get(5)
        except ValueError: pass
        assert x is None

        return True
Ejemplo n.º 4
0
def convertTrainingData():
    data = {
        'articleTrain.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown',
            'getSubarticlesCount', 'createSubarticle', 'notSubarticleRating',
            'PgetSub', 'PcreateSub'
        ]],
        'articleTest.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown',
            'getSubarticlesCount', 'createSubarticle', 'notSubarticleRating',
            'PgetSub', 'PcreateSub'
        ]],
        'articleCV.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown',
            'getSubarticlesCount', 'createSubarticle', 'notSubarticleRating',
            'PgetSub', 'PcreateSub'
        ]],
        'subarticleTrain.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]],
        'subarticleTest.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]],
        'subarticleCV.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]],
        'commentTrain.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]],
        'commentTest.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]],
        'commentCV.csv': [[
            'yUp', 'yDown', 'yGetCom', 'yCreateCom', 'yGetSub', 'yCreateSub',
            'id', 'rating', 'viewCount', 'upVoteCount', 'downVoteCount',
            'getCommentsCount', 'createComment', 'notCommentRating',
            'PgetComment', 'PcreateComment', 'Pup', 'Pdown'
        ]]
    }

    arts = articles[:]
    random.shuffle(arts)

    cvLength = int(0.05 * len(arts))
    testLength = int(0.10 * len(arts))

    cvArts = SortedCollection(arts[0:cvLength], key=itemgetter('id'))
    testArts = SortedCollection(arts[cvLength:(cvLength + testLength)],
                                key=itemgetter('id'))
    trainArts = SortedCollection(arts[(cvLength + testLength):],
                                 key=itemgetter('id'))

    cvSubs = SortedCollection([], key=getId)
    testSubs = SortedCollection([], key=getId)
    trainSubs = SortedCollection([], key=getId)

    cvComs = SortedCollection([], key=getId)
    testComs = SortedCollection([], key=getId)
    trainComs = SortedCollection([], key=getId)

    def contains(lst, Id):
        try:
            if lst.find(str(Id)):
                return True
            else:
                return False
        except ValueError:
            return False

    for sub in Subarticles.find():
        if contains(testArts, sub['parentId']):
            testSubs.insert(sub)
        elif contains(cvArts, sub['parentId']):
            cvSubs.insert(sub)
        else:
            trainSubs.insert(sub)

    replies = []
    for comment in Comments.find():
        if comment['commentableType'] == 'article':
            if contains(cvArts, comment['commentableId']):
                cvComs.insert(comment)
            elif contains(testArts, comment['commentableId']):
                testComs.insert(comment)
            else:
                trainComs.insert(comment)
        elif comment['commentableType'] == 'subarticle':
            if contains(cvSubs, comment['commentableId']):
                cvComs.insert(comment)
            elif contains(testSubs, comment['commentableId']):
                testComs.insert(comment)
            else:
                trainComs.insert(comment)
        else:
            replies.append(comment)

    for comment in replies:
        if comment['commentableType'] == 'article':
            if contains(cvArts, comment['commentableId']):
                cvComs.insert(comment)
            elif contains(testArts, comment['commentableId']):
                testComs.insert(comment)
            else:
                trainComs.insert(comment)
        elif comment['commentableType'] == 'subarticle':
            if contains(cvSubs, comment['commentableId']):
                cvComs.insert(comment)
            elif contains(testSubs, comment['commentableId']):
                testComs.insert(comment)
            else:
                trainComs.insert(comment)
        elif comment['commentableType'] == 'comment':
            if contains(cvComs, comment['commentableId']):
                cvComs.insert(comment)
            elif contains(testComs, comment['commentableId']):
                testComs.insert(comment)
            else:
                trainComs.insert(comment)
        else:
            print "Comment on a comment!"

    print("cvArts: {}\tcvSubs: {}\tcvComs: {}").format(len(cvArts),
                                                       len(cvSubs),
                                                       len(cvComs))
    print("testArts: {}\ttestSubs: {}\ttestComs: {}").format(
        len(testArts), len(testSubs), len(testComs))
    print("trainArts: {}\ttrainSubs: {}\ttrainComs: {}").format(
        len(trainArts), len(trainSubs), len(trainComs))
    sys.stdout.flush()

    views = Views.find().sort("_id", 1)
    viewLength = views.count()
    pbar = progressbar.ProgressBar(widgets=[
        progressbar.Timer(),
        progressbar.ETA(),
        progressbar.Bar(),
        progressbar.Percentage()
    ],
                                   maxval=viewLength).start()

    processed = int(0)
    for view in views:
        pbar.update(processed)
        processed = processed + 1
        x, y = prepTrainingSet(view)
        if len(x):
            dat = np.append(y, x).tolist()
            if (view['viewableType'] == 'article'):
                if contains(cvArts, view['viewableId']):
                    data['articleCV.csv'].append(dat)
                elif contains(testArts, view['viewableId']):
                    data['articleTest.csv'].append(dat)
                else:
                    data['articleTrain.csv'].append(dat)
            elif (view['viewableType'] == 'subarticle'):
                if contains(cvSubs, view['viewableId']):
                    data['subarticleCV.csv'].append(dat)
                elif contains(testSubs, view['viewableId']):
                    data['subarticleTest.csv'].append(dat)
                else:
                    data['subarticleTrain.csv'].append(dat)
            elif (view['viewableType'] == 'comment'):
                if contains(cvComs, view['viewableId']):
                    data['commentCV.csv'].append(dat)
                elif contains(testComs, view['viewableId']):
                    data['commentTest.csv'].append(dat)
                else:
                    data['commentTrain.csv'].append(dat)
            else:
                print "Unknown viewableType: {}"

    pbar.finish()
    print "Writing results"
    for filename, lst in data.items():
        with open(filename, 'wb') as csvfile:
            writer = csv.writer(csvfile)
            for line in lst:
                writer.writerow(line)
Ejemplo n.º 5
0
        "upVoteCount": 0,
        "downVoteCount": 0,
        "getSubarticlesCount": 0,
        "getCommentsCount": 0,
        "createComment": 0,
        "createSubarticle": 0,
        "notSubarticleRating": 1,
        "notCommentRating": 1,
        "PgetComment": 0,
        "PgetSub": 0,
        "PcreateComment": 0,
        "PcreateSub": 0,
        "Pup": 0,
        "Pdown": 0,
    }
    articles.insert(art)


def findArticle(articleId):
    articleId = str(articleId)
    try:
        art = articles.find(articleId)
    except ValueError as e:
        return
    return art


print "Processing subarticle"
subarticles = SortedCollection([], key=itemgetter('id'))
for subarticle in ArticlesDB.subarticle.find():
    sub = {
class RequestProcessor:

    def __init__ (self, cores_number = 1, request_list = None, HEAP = 4000, memory_size = 100,
                  cores_computation_error_sigma = 0, GC_time_coefficient = 1):
        self.__timeline = SortedCollection([], TimelineEvent.getEventTime)
        self.__cores_number = cores_number
        self.__free_cores = cores_number
        self.__cores = [Core(i, cores_computation_error_sigma) for i in range(cores_number)]

        self.__queue = []

        self.__HEAP = HEAP
        self.__free_HEAP = HEAP
        self.__garbage_collector = GarbageCollector(time_coefficient = GC_time_coefficient)
        self.__garbage_collector_status = False
        self.__garbage_collector_session = (None, None)

        self.__processed_request_list = []

        self.__memory_list = [0 for i in range(memory_size)]
        if request_list != None:
            for request in request_list:
                event = RequestEvent(request.getRequestStartTime(), EventType.new_request, request.getRequestNumber())
                self.__timeline.insert(event)
        self.__computation_times = []

    def __getHEAPLoad__(self):
        return (self.__HEAP - self.__free_HEAP) / self.__HEAP

    def __getFirstFreeCore__(self):
        'returns first free core, or -1 if all cores are busy'
        core_number = 0
        for core in self.__cores:
            if core.getCoreStatus() == CoreStatus.free:
                return core_number
            else:
                core_number += 1
        return -1

    def __printSelfStatus__(self):
        print(self.__garbage_collector_status, self.__free_cores, self.__queue)

    def __memoryAvailable__(self, left_cell, right_cell):
        for i in range(left_cell, right_cell):
            if self.__memory_list[i] == 1:
                return False
        return True

    def __lockMemory__(self, left_cell, right_cell):
        #print(left_cell, right_cell)
        for i in range(left_cell, right_cell):
            self.__memory_list[i] = 1

    def __releaseMemory__(self, left_cell, right_cell):
        for i in range(left_cell, right_cell):
            self.__memory_list[i] = 0


    def startGC(self, current_time):
        self.__garbage_collector_status = True

        start_garbage_collector = GCEvent(current_time, EventType.start_garbage_collector)
        time_coefficient = self.__garbage_collector.getTimeCoefficient()
        collecting_time = (self.__getHEAPLoad__() - self.__garbage_collector.getStopPercentage()) * self.__HEAP * time_coefficient
        stop_garbage_collector = GCEvent(current_time + collecting_time, EventType.stop_garbage_collector)

        self.__garbage_collector_session = (start_garbage_collector, stop_garbage_collector)
        self.__timeline.insert_right(start_garbage_collector)
        self.__timeline.insert_right(stop_garbage_collector)

    def processNewRequest(self, request, new_request_event):
        free_core = self.__getFirstFreeCore__()

        if self.__garbage_collector_status:
            self.__queue.append(request)
        elif not self.__memoryAvailable__(request.getLeftCell(), request.getRightCell()):
            self.__queue.append(request)
            #print("lock")
        elif self.__free_cores >= 1:
            self.__free_cores -= 1
            self.__free_HEAP -= request.getHEAPUsage()
            core_occupation_event, request_finish_event, core_release_event = \
                self.__cores[free_core].processRequest(request, new_request_event.getEventTime())
            self.__timeline.insert_right(core_occupation_event)
            self.__timeline.insert(request_finish_event)
            self.__timeline.insert_right(core_release_event)
            self.__lockMemory__(request.getLeftCell(), request.getRightCell())
        else:
            self.__queue.append(request)

    def processCoreRelease(self, core_release_event):
        #print("DOING CORE RELEASE")
        if not self.__garbage_collector_status:
            self.__free_cores += 1
            processed_request = self.__cores[core_release_event.getCoreId()].releaseCore()
            #print(processed_request)
            self.__releaseMemory__(processed_request.getLeftCell(), processed_request.getRightCell())
            #print("lalal")
            self.__processed_request_list.append(processed_request)

        else:
            self.__timeline.remove(core_release_event)
            empty_event = TimelineEvent(core_release_event.getEventTime(), EventType.empty_event)
            self.__timeline.insert(empty_event)
            delay = core_release_event.getEventTime() - self.__garbage_collector_session[0].getEventTime()
            core_release_event.resetEventTime(self.__garbage_collector_session[1].getEventTime() + delay)
            self.__timeline.insert_right(core_release_event)

    def processGSStop(self, gc_stop_event):
        #print ("STOPPED")
        self.__garbage_collector_status = False
        self.__garbage_collector_session = (None, None)
        self.__free_HEAP = self.__HEAP * (1 - self.__garbage_collector.getStopPercentage())
        #while self.__queue and self.__getFirstFreeCore__ != -1:
        #    request = self.__queue.pop(0)
        #    self.processNewRequest(request, gc_stop_event)


    def processRequests(self, request_list = None):

        if request_list:
            index = 0
            current_event = TimelineEvent()
            while index < len(self.__timeline) or self.__queue:
                queue_replicant = list(self.__queue)
                ##print(' '.join(map(str, self.__memory_list)))
                #print("\n")
                from time import sleep
                #sleep(0.05)
                if index >= len(self.__timeline):
                    event = current_event
                else:
                    event = self.__timeline[index]

                if self.__getHEAPLoad__() > self.__garbage_collector.getStartPercentage() and not self.__garbage_collector_status:
                    self.startGC(event.getEventTime())

                #while self.__queue and self.__getFirstFreeCore__() != -1 and not self.__garbage_collector_status :
                #    print('stuck')
                #    sleep(0.05)
                #    request = self.__queue.pop(0)
                #    self.processNewRequest(request, event)
                while queue_replicant and self.__getFirstFreeCore__() != -1 and not self.__garbage_collector_status :
                    #print('stuck')
                    #sleep(0.05)
                    request = queue_replicant.pop(0)
                    self.__queue.pop(0)
                    self.processNewRequest(request, event)

                if event != current_event:
                    if event.getEventType() == EventType.new_request:
                        self.processNewRequest(request_list[event.getRequestNumber()], event)
                    if event.getEventType() == EventType.core_release:
                        self.processCoreRelease(event)
                    if event.getEventType() == EventType.stop_garbage_collector:
                        self.processGSStop(event)

                current_event = event
                index += 1


    def getTimeline(self):
        return self.__timeline

    def printTimeline(self):
        for event in self.__timeline:
            event.printEvent()

    def getComputationTimes(self):
        return self.__computation_times

    def getProcessedRequestList(self):
        return self.__processed_request_list
Ejemplo n.º 7
0
class RangeCache(object):
    """
    RangeCache is a data structure that tracks a finite set of
      ranges (a range is a 2-tuple consisting of a numeric start
      and numeric length). New ranges can be added via the `push`
      method, and if such a call causes the capacity to be exceeded,
      then the "oldest" range is removed. The `get` method implements
      an efficient lookup for a single value that may be found within
      one of the ranges.
    """
    def __init__(self,
                 capacity,
                 start_key=lambda o: o[0],
                 length_key=lambda o: o[1]):
        """
        @param key: A function that fetches the range start from an item.
        """
        super(RangeCache, self).__init__()
        self._ranges = SortedCollection(key=start_key)
        self._lru = BoundedLRUQueue(capacity, key=start_key)
        self._start_key = start_key
        self._length_key = length_key

    def push(self, o):
        """
        Add a range to the cache.

        If `key` is not provided to the constructor, then
          `o` should be a 3-tuple:
            - range start (numeric)
            - range length (numeric)
            - range item (object)
        """
        self._ranges.insert(o)
        popped = self._lru.push(o)
        if popped is not None:
            self._ranges.remove(popped)

    def touch(self, o):
        self._lru.touch(o)

    def get(self, value):
        """
        Search for the numeric `value` within the ranges
          tracked by this cache.
        @raise ValueError: if the value is not found in the range cache.
        """
        hit = self._ranges.find_le(value)
        if value < self._start_key(hit) + self._length_key(hit):
            return hit
        raise ValueError("%s not found in range cache" % value)

    @staticmethod
    def test():
        q = RangeCache(2)

        x = None
        try:
            x = q.get(0)
        except ValueError:
            pass
        assert x is None

        x = None
        try:
            x = q.get(1)
        except ValueError:
            pass
        assert x is None

        q.push((1, 1, [0]))

        x = None
        try:
            x = q.get(0)
        except ValueError:
            pass
        assert x is None

        assert q.get(1) == (1, 1, [0])
        assert q.get(1.99) == (1, 1, [0])
        x = None
        try:
            x = q.get(2.01)
        except ValueError:
            pass
        assert x is None

        q.push((3, 1, [1]))
        assert q.get(1) == (1, 1, [0])
        assert q.get(3) == (3, 1, [1])

        q.push((5, 1, [2]))
        x = None
        try:
            x = q.get(1)
        except ValueError:
            pass
        assert x is None

        assert q.get(3) == (3, 1, [1])
        assert q.get(5) == (5, 1, [2])

        q.touch((3, 1, [1]))
        q.push((7, 1, [3]))

        assert q.get(3) == (3, 1, [1])
        assert q.get(7) == (7, 1, [3])
        x = None
        try:
            x = q.get(5)
        except ValueError:
            pass
        assert x is None

        return True
Ejemplo n.º 8
0
class PatternFrameSchedule(FrameScheduleBase):
    '''
    Schedules of this type assume that time and frequency are separated out into 
    a rectangular grid of time/frequency tiles. Time slots need not be adjacent.
    Channels in frequency must be adjacent. Time slots cannot overlap, and extend across
    all channels. Each pattern of time/frequency/owner allocations is mapped to an index.
    
    The action space stored in _action_space must include at least one time/frequency
    allocation pattern or this schedule will not work
    
    '''
    stateTup = namedtuple('stateTup', 'time_ref frame_num_ref first_frame_num action_ind epoch_num')
    LinkTuple = namedtuple('LinkTuple', 'owner linktype')
    varTup = namedtuple('varTup', ('frame_offset tx_time valid tx_gain gains slot_bw schedule_seq max_scheds rf_freq'))
    
    PatternTuple = namedtuple("PatternTuple", 'owner len offset type bb_freq')
    
    
    gains = None
    slot_bw = None
    schedule_seq = None
    max_scheds = None
    
    # this variable will be tricky: It'll be modified as a class level variable, and won't
    # be sent across during pickle/unpickle operations but will rely on the classes on
    # remote machines being configured properly
    _action_space = None
    
    sync_space = None
    num_actions = None
    
    def __init__(self, tx_time=None, frame_offset=None, time_ref=None, 
                 first_frame_num=None, frame_num_ref=None, valid=None,
                 tx_gain=None, max_schedules=2, action_ind=None,
                 rf_freq=None, slot_bw=0.0, epoch_num=None):
        
        if tx_time is not None:
            self.tx_time = time_spec_t(tx_time)
        else:
            self.tx_time = None
            
        self.frame_offset = frame_offset
        
        self.valid = valid
        
        
        # this is the list of schedule states this schedule object knows about.
        # The schedules are ordered by first_frame_num
        self.schedule_seq = SortedCollection(key=itemgetter(2))  
        self.max_scheds = max_schedules
        
        # use a default dict so slots with no initialized gain will use the default tx
        # gain 
        
        self.tx_gain = tx_gain
        self.gains = defaultdict(self.constant_factory(self.tx_gain))
        
        # set default values for all controllable parameters. These are what will be used
        # if the action space doesn't specify a value
        self.rf_freq = rf_freq
        
        self.slot_bw = slot_bw
        
        first_state = (time_ref, frame_num_ref, first_frame_num, action_ind, epoch_num)
        # only add the initial state if all the necessary params are defined
        if all( v is not None for v in first_state):
            self.add_schedule(*first_state)
            
    @staticmethod    
    def constant_factory(value):
            return itertools.repeat(value).next 
            
    def add_schedule(self, time_ref, frame_num_ref, first_frame_num, action_ind, epoch_num=None):
        '''
        Add a schedule to the end of the schedule queue, and if the queue is over 
        capacity, pop off the oldest element
        '''
        self.schedule_seq.insert((time_spec_t(time_ref).to_tuple(), frame_num_ref, first_frame_num, action_ind, epoch_num))
        
        if len(self.schedule_seq) > self.max_scheds:
            # find the first element in the list when sorted by frame number
            self.schedule_seq.remove(self.schedule_seq[0])
            
                    
    def compute_frame(self, frame_num=None):
        '''
        Given a frame number, produce an individual frame configuration
        '''
    
        
        
        if frame_num is None:
            sched = self.stateTup(*self.schedule_seq[0])
        
        else:
            try:
                sched_tup = self.schedule_seq.find_le(frame_num)
            except ValueError:
                sched_tup = self.schedule_seq[0]
            sched = self.stateTup(*sched_tup)
        
        #print "Frame num is %i, action ind is %i"%(frame_num, sched.action_ind)
        #print "schedule sequence is %s"%self.schedule_seq
            
        action = self._action_space[sched.action_ind]     
#        if "pattern" not in action:
#            # TODO: Make a better exception for when there aren't any patterns
#            raise KeyError("Expected at least one pattern object in the action space")  
#        else:
        frame_len = action["frame_len"]
        frame_delta = frame_num - sched.frame_num_ref
        
        t0 = time_spec_t(sched.time_ref) + frame_len*frame_delta
        
        frame_config = {"frame_len":frame_len,
                        "t0":t0,
                        "t0_frame_num":frame_num,
                        "first_frame_num":sched.first_frame_num,
                        "valid":self.valid,
                        "epoch_num":sched.epoch_num,
                        }
        
        # get all the parameters needed for computing each slot in frame_config
        
        if "rf_freq" in action:
            rf_freq = action["rf_freq"]
        else:
            rf_freq = self.rf_freq
        
        # get the list of gains per slot
        act_slots = action["slots"]
        gains = [ self.gains[(s.owner, s.type)] for s in act_slots]  
        
         
        slots = [SlotParamTuple(owner=s.owner, len=s.len, offset=s.offset, type=s.type,
                                rf_freq=rf_freq, bb_freq=s.bb_freq, bw=self.slot_bw,
                                tx_gain=gain) for gain, s in zip(gains, act_slots)]
        
        frame_config["slots"] = slots

        for s in slots:
            if s.type == "beacon":
                pass
                #print ("frame at time %s beacon slot at offset %f fr freq %f and "
                #       +"channel %f")%(frame_config["t0"], s.offset, s.rf_freq, s.bb_freq)
       
        return frame_config

     
    def store_current_config(self):
        '''
        No longer needed
        '''
        pass

    def store_tx_gain(self, owner, linktype, gain):
        '''
        Update the gain setting for the current and next schedules by owner and link type
        '''    
        self.gains[(owner, linktype)] = gain   
                
    
    def get_unique_links(self, frame_num):
        '''
        Return a list of unique owner-link type tuples 
        ''' 
        # get the schedule in effect for frame_num
        
        try:
            sched = self.stateTup(*self.schedule_seq.find_le(frame_num))
        except ValueError:
            # didn't find any frames less than or equal frame number, so return an empty
            # list
            return list()
            
        action = self._action_space[sched.action_ind]
        
        unique_links = set()
        # add links from the 'next' frame config
        for s in action["pattern"]["slots"]:
            unique_links.add(self.LinkTuple(s.owner, s.type))  
        
        return list(unique_links)
    
    def get_uplink_gain(self, owner):
        '''
        Get the uplink gain associated with an owner
        '''

        uplink_gain = self.gains[(owner, "uplink")]    
        
        return uplink_gain
    @property    
    def time_ref(self):
        return time_spec_t(self.schedule_seq[-1][0])
    
    @time_ref.setter
    def time_ref(self, value):
        # ignore values here until redesign makes this unnecessary
        pass
     
    def __getstate__(self):
        '''
        load all the instance variables into a namedtuple and then return that as 
        a plain tuple to cut down on the size of the pickled object
        '''
        try:
            
            inst_vars = self.__dict__.copy()
            inst_vars["schedule_seq"] = list(inst_vars["schedule_seq"])
            inst_vars["gains"] = dict(inst_vars["gains"])
            temp_tup = self.varTup(**inst_vars)
            

        except TypeError:
            found_fields = inst_vars.keys()
            expected_fields = self.varTup._fields
            raise TypeError(("The beacon class does not support adding or removing " +
                             "variables when pickling. " + 
                             "Found %s, expected %s" % (found_fields, expected_fields)))
        

                
        return tuple(temp_tup) 
            
    def __setstate__(self,b):
        '''
        load b, which will be a plain tuple, into a namedtuple and then convert that to
        this instance's __dict__ attribute
        '''
        try:
            temp_tup = self.varTup(*b)
            self.__dict__.update(temp_tup._asdict())
            
            self.schedule_seq = SortedCollection(temp_tup.schedule_seq,
                                                 key=itemgetter(2))
            self.gains = defaultdict(self.constant_factory(self.tx_gain))
            self.gains.update(temp_tup.gains)
            
        except TypeError:
            raise TypeError(("The beacon class does not support adding or removing " +
                             "variables when pickling"))

    def __cmp__(self, other):
        simp_vals_equal = all([ self.__dict__[key] == val for key,val 
                               in other.__dict__.iteritems() 
                               if (key != "gains") and (key != "schedule_seq")])
        
        gains_equal = dict(self.__dict__["gains"]) == dict(other.__dict__["gains"])
        seq_equal = list(self.__dict__["schedule_seq"]) == list(other.__dict__["schedule_seq"])
         
        return all([simp_vals_equal, gains_equal, seq_equal])

    def __eq__(self, other): 
        simp_vals_equal = all([ self.__dict__[key] == val for key,val 
                               in other.__dict__.iteritems() 
                               if (key != "gains") and (key != "schedule_seq")])
        
        gains_equal = dict(self.__dict__["gains"]) == dict(other.__dict__["gains"])
        seq_equal = list(self.__dict__["schedule_seq"]) == list(other.__dict__["schedule_seq"])
         
        return all([simp_vals_equal, gains_equal, seq_equal])
    
    def __repr__(self):
        
        s = ["PatternFrameSchedule(",
             "frame_offset=%r"%self.frame_offset,
             ", tx_time=%r"%self.tx_time,
             ", valid=%r"%self.valid,
             ", tx_gain=%r"%self.tx_gain,
             ", gains=%r"%dict(self.gains),
             ", slot_bw=%r"%self.slot_bw,
             ", schedule_seq=%r"%list(self.schedule_seq),
             ", max_scheds=%r"%self.max_scheds,
             ", rf_freq=%r"%self.rf_freq,
             ")"]
        
        repr_str = ''.join(s)    
        
        return repr_str
    
    @staticmethod
    def check_types(slot, fields):
        # convert each entry to the correct type
        types_valid = True
        failed_fields = []
        #print "converting row: %s" % row
        for field_name in slot._fields:
            
            if not isinstance(getattr(slot, field_name), fields[field_name]):
                wrong_type = type(getattr(slot, field_name))
                failed_fields.append((field_name, fields[field_name], wrong_type))
                types_valid = False
                
        return types_valid, failed_fields
       
    
    @staticmethod
    def load_pattern_set_from_file(pattern_file, set_name, fs):
        dev_log = logging.getLogger('developer')
        
        # sanitize path name and pull apart path from base file name
        abs_pattern_file = os.path.expandvars(os.path.expanduser(pattern_file))
        abs_pattern_file = os.path.abspath(abs_pattern_file)
        abs_pattern_dir = os.path.dirname(abs_pattern_file)
        pattern_basename = os.path.basename(abs_pattern_file)
        
        if os.path.isdir(abs_pattern_dir):
            sys.path.append(abs_pattern_dir)
        else:
            dev_log.error("pattern directory does not exist: %s",abs_pattern_dir)
            return False
        
        try:
            sanitized_pattern_file = os.path.splitext(pattern_basename)[0]
            group_module = __import__(sanitized_pattern_file)
            dev_log.info("using pattern sets from %s", group_module.__file__)
            
            pattern_set = getattr(group_module, set_name)
        except ImportError:
            dev_log.error("Could not import %s from directory %s", 
                          pattern_basename, abs_pattern_dir)
            raise ImportError
        except AttributeError:
            dev_log.error("Pattern set %s not found in file %s", 
                          set_name, group_module.__file__)
            raise AttributeError
        
        slot_fields = dict([("owner",int),
                            ("len",float),
                            ("offset",float),
                            ("type",str),
                            ("rf_freq",float),
                            ("bb_freq",int),
                            ("bw",float),
                            ("tx_gain",float),])
        
        all_rf_freqs_found = True
        
        for m, frame in enumerate(pattern_set):
            # check that the pattern set includes rf_frequency for each action
            
            if "rf_freq_ind" not in frame:
                
                dev_log.warning("RF frequency index not specified in action number %i in Pattern set %s in file %s", 
                              m, set_name, group_module.__file__)
                all_rf_freqs_found = False
                
            
            for n, slot in enumerate(frame["slots"]):
                types_valid, failed_fields = PatternFrameSchedule.check_types(slot, slot_fields)
                
                if not types_valid:
                    for failure in failed_fields:
                        
                        dev_log.warning("Field %s in Slot %i in frame index %i failed field type validation. Type was %s but should be %s",
                                        failure[0], n, m, failure[2], failure[1])
            
            # log an error and raise an exception if there's a missing rf frequency field        
            if not all_rf_freqs_found:
                dev_log.error("At least one action in the pattern file was missing an rf_freq_ind field")
                raise AttributeError
       
        
        
        # sort slots by order of offset
        for m, frame in enumerate(pattern_set):
            frame["slots"].sort(key=lambda slot: slot.offset)
            frame_len_rounded = round(frame["frame_len"]*fs)/fs

        
        # 
        # enforce slot/frame boundaries occur at integer samples
        #
        
        # check that frame len is at an integer sample
            if frame["frame_len"] != frame_len_rounded:
                dev_log.warn("rounding frame len from %.15f to %.15f", frame["frame_len"], 
                             frame_len_rounded)
                pattern_set[m]["frame_len"] = frame_len_rounded
            
        try:    
        
            # do a limited amount of error checking    
            for ind, frame in enumerate(pattern_set):
                for num, slot in enumerate(frame["slots"]):
                
                    offset_rounded = round(slot.offset*fs)/fs
                    len_rounded = round(slot.len*fs)/fs
                    
                    if slot.offset != offset_rounded:
                        dev_log.warn("rounding frame %d slot %d offset from %.15f to %.15f",
                                     ind, num, slot.offset,offset_rounded)
                        
                    if slot.len != len_rounded:
                        dev_log.warn("rounding frame %d slot %d len from %.15f to %.15f", 
                                     ind, num, slot.len, len_rounded)
                    
                    # more precision fun
                    end_of_slot = round( (offset_rounded + len_rounded)*fs)/fs        
                    
                    if end_of_slot > frame["frame_len"]:
                        raise InvalidFrameError(("slot %d with offset %f and len %f extends past " + 
                                                 "the end of the frame, len %f") % (num, slot.offset,
                                                  slot.len, frame["frame_len"]))
                        
                    pattern_set[ind]["slots"][num] = slot._replace(offset=offset_rounded, 
                                                                    len=len_rounded)
        
        except InvalidFrameError, err:
            dev_log.error("Invalid Frame: %s", err)
            raise
        

#        self.__class__.pattern_set = deepcopy(pattern_set)
        return pattern_set