def basic_setup():
    # setup
    host1_uuid = util.generate_uuid()
    host2_uuid = util.generate_uuid()

    a_b_conn = test_util.DummyConnectionObj()
    c_r2_conn = test_util.DummyConnectionObj()

    garbage_conn = test_util.DummyConnectionObj()

    # host one endpoints
    r1 = EndpointR1(garbage_conn,host1_uuid)
    a = EndpointA(a_b_conn,host1_uuid)
    c = EndpointC(a,c_r2_conn,host1_uuid)

    # host two endpoints
    r2 = EndpointR2(c_r2_conn,host2_uuid)
    b = EndpointB(a_b_conn,host2_uuid)

    # start each event
    act_r1 = r1.evt_r1(a)
    act_r2 = r2.evt_r2(b)

    try1 = TryCommit(act_r1)
    try2 = TryCommit(act_r2)
    try2.start()
    try1.start()


    return True
Beispiel #2
0
    def __init__(self,local_endpoint,uuid, priority):
        '''
        @param{uuid} uuid --- If None, then generate a random uuid and
        use that.  Otherwise, use the uuid specified.  Note: not all
        root events use a random uuid because some must be boosted.
        '''
        if uuid is None:
            uuid = util.generate_uuid()
        
        self.local_endpoint = local_endpoint
        # indices are event uuids.  Values are bools.  When all values
        # are true in this dict, then we can transition into second
        # phase commit.
        self.endpoints_waiting_on_commit = {}
        # we can add and remove events to waiting on commit lock from
        # multiple threads.  it appears that this can desynchronize
        # operations on endpoints_waiting_on_commit.  For instance, if
        # one thread puts in that it's waiting on a particular
        # endpoint and another thread puts in that the other
        # endpoint's first phase transition has been received, we can
        # get a case where the first message overwrites the operations
        # of the second.
        self._endpoints_waiting_on_commit_lock = threading.Lock()

        
        # when the root tries to commit the event, it blocks while
        # reading the event_complete_queue
        self.event_complete_queue = util.Queue.Queue()
        super(RootEventParent,self).__init__(uuid,priority)
    def issue_partner_sequence_block_call(
        self,ctx,func_name,threadsafe_unblock_queue, first_msg):
        '''
        @param {String or None} func_name --- When func_name is None,
        then sending to the other side the message that we finished
        performing the requested block.  In this case, we do not need
        to add result_queue to waiting queues.

        @param {bool} first_msg --- True if this is the first message
        in a sequence that we're sending.  Necessary so that we can
        tell whether or not to force sending sequence local data.

        @param {Queue or None} threadsafe_unblock_queue --- None if
        this was the last message sent in a sequence and we're not
        waiting on a reply.
        
        The local endpoint is requesting its partner to call some
        sequence block.
        '''
        partner_call_requested = False
        self._lock()

        if self.state == self.STATE_RUNNING:
            partner_call_requested = True
            self._others_contacted_lock()
            self.partner_contacted = True
            self._others_contacted_unlock()

            # code is listening on threadsafe result_queue.  when we
            # receive a response, put it inside of the result queue.
            # put result queue in map so that can demultiplex messages
            # from partner to determine which result queue is finished
            reply_with_uuid = util.generate_uuid()
            
            if threadsafe_unblock_queue != None:
                # may get None for result queue for the last message
                # sequence block requested.  It does not need to await
                # a response.
                self.message_listening_queues_map[
                    reply_with_uuid] = threadsafe_unblock_queue
                
                
            # here, the local endpoint uses the connection object to
            # actually send the message.
            self.event_parent.local_endpoint._send_partner_message_sequence_block_request(
                func_name, self.uuid, self.get_priority(),reply_with_uuid,
                ctx.to_reply_with_uuid, self,
                # sending sequence_local_store so that can determine
                # deltas in sequence local state made from this call.
                # do not need to add global store, because
                # self.local_endpoint already has a copy of it.
                ctx.sequence_local_store,
                first_msg)

            
        self._unlock()
        return partner_call_requested
Beispiel #4
0
    def create_root_event(self):
        evt_uuid = util.generate_uuid()
        if len(self.event_list) == 0:
            evt_priority = generate_boosted_priority(self.last_boosted_complete)
        else:
            evt_priority = generate_timed_priority(self.clock.get_timestamp())

        rep = RootEventParent(self.act_event_map.local_endpoint,evt_uuid,evt_priority)
        root_event = LockedActiveEvent(rep,self.act_event_map)
        self.event_list.append(root_event)
        return root_event
 def __init__(self,data_wrapper_constructor,host_uuid,peered,init_val):
     '''
     @param {DataWrapper object} --- Used to store dirty values.
     For value types, can just use ValueTypeDataWrapper.  For
     reference types, should use ReferenceTypeDataWrpper.
     '''
     self.data_wrapper_constructor = data_wrapper_constructor
     self.uuid = util.generate_uuid()
     
     self.host_uuid = host_uuid
     self.peered = peered
     
     # still using data wrappers because data wrappers keep track
     # whether this variable was written since last message.
     self.val = self.data_wrapper_constructor(init_val,self.peered)
    def __init__(self,data_wrapper_constructor,host_uuid,peered,init_val):
        '''
        @param {DataWrapper object} --- Used to store dirty values.
        For value types, can just use ValueTypeDataWrapper.  For
        reference types, should use ReferenceTypeDataWrpper.
        '''
        # m = Monitor(self)
        # m.start()
        
        self.data_wrapper_constructor = data_wrapper_constructor
        self.uuid = util.generate_uuid()

        
        self.host_uuid = host_uuid
        self.peered = peered

        self.val = self.data_wrapper_constructor(init_val,self.peered)
        self.dirty_val = None
        
        # If write_lock_holder is not None, then the only element in
        # read_lock_holders is the write lock holder.
        # read_lock_holders maps from uuids to EventCachedPriorityObj.
        self.read_lock_holders = {}
        # write_lock_holder is EventCachedPriorityObj
        self.write_lock_holder = None

        
        # A dict of event uuids to WaitingEventTypes
        self.waiting_events = {}

        # In try_next, can cause events to backout.  If we do cause
        # other events to backout, then backout calls try_next.  This
        # (in some cases) can invalidate state that we're already
        # dealing with in the parent try_next.  Use this flag to keep
        # track of whether already in try next.  If are, then return
        # out immediately from future try_next calls.
        self.in_try_next = False
        
        # FIXME: do not have to use reentrant lock here.  The reason
        # that I am is that when we request an event to release locks
        # on an object, we do so from within a lock.  Following that,
        # the event calls into a lock-protected method to remove
        # itself from the dict.  Could re-write code to trak which obj
        # requested backout and skip backing that obj out instead.
        self._mutex = threading.RLock()
Beispiel #7
0
    def __init__(self,conn_obj=None,host_uuid = None):
        
        if conn_obj is None:
            conn_obj = _WaldoSingleSideConnectionObject()

        if host_uuid == None:
            host_uuid = generate_uuid()
            
        glob_var_store = _VariableStore(host_uuid)

        self.end_global_number_var_name = 'numero'
        glob_var_store.add_var(
            self.end_global_number_var_name,
            LockedNumberVariable(host_uuid,False,100))

        _Endpoint.__init__(
            self,Waldo._waldo_classes,
            host_uuid,conn_obj,glob_var_store)
Beispiel #8
0
    def __init__(self,conn_obj,host_uuid = None):
        
        # all dummy endpoints will have the same _VariableStore
        # Peered Number numero = 100;
        # Peered Text some_str = 'test';
        # Peered List (elements: Text) text_list;
        if host_uuid == None:
            host_uuid = util.generate_uuid()
            
        glob_var_store = waldoVariableStore._VariableStore(host_uuid)
        
        self.peered_number_var_name = 'numero'
        glob_var_store.add_var(
            self.peered_number_var_name,
            wVariables.WaldoNumVariable(
                self.peered_number_var_name,host_uuid,
                True,100))

        self.peered_str_var_name = 'some_str'
        glob_var_store.add_var(
            self.peered_str_var_name,
            wVariables.WaldoTextVariable(
                self.peered_str_var_name,host_uuid,
                True,'test'))
        
        self.peered_list_var_name = 'text_list'
        glob_var_store.add_var(
            self.peered_list_var_name,
            wVariables.WaldoTextVariable(
                self.peered_list_var_name,host_uuid,True))

        self.peered_map_var_name = 'some map'
        glob_var_store.add_var(
            self.peered_map_var_name,
            wVariables.WaldoMapVariable('some map',host_uuid,True))

        
        waldoEndpoint._Endpoint.__init__(
            self,Waldo._waldo_classes,
            host_uuid,conn_obj,glob_var_store)
Beispiel #9
0
from waldo.lib import wVariables, util


'''
Puts lists inside of lists.  Want to ensure that if we write to list
elements, which are themselves lists, that if we modify one of the
elements separately, we'll:
  a) see that change reflected in the original list
  b) we won't get read/write collisions between element lists and
     master lists
  c) we can still perform operations in parallel between the master
     list and element lists so long as those do not affect each other.
     
'''

host_uuid = util.generate_uuid()


def create_two_events(dummy_endpoint):
    evt1 = dummy_endpoint._act_event_map.create_root_event()
    evt2 = dummy_endpoint._act_event_map.create_root_event()    
    return evt1,evt2

def create_list(dummy_endpoint,to_populate_with):
    '''
    @param {list} to_populate_with --- Each element gets appended to
    the list that we return.  This can be a list of value types, or a
    list of Waldo's internal lists.
    '''
    new_list = wVariables.WaldoListVariable('some name',host_uuid)
    evt1,evt2 = create_two_events(dummy_endpoint)
Beispiel #10
0
    def complete_root_event(self,completed_event_uuid,retry):
        '''
        @param {UUID} completed_event_uuid

        @param {bool} retry --- If the event that we are removing is
        being removed because backout was called, we want to generate
        a new event with a successor UUID (the same high-level bits
        that control priority/precedence, but different low-level
        version bits)
        
        Whenever any root event completes, this method gets called.
        If this event had been a boosted event, then we boost the next
        waiting event.  Otherwise, we remove it from the list of
        waiting uuids.

        @returns {None/Event} --- If retry is True, we return a new
        event with successor uuid.  Otherwise, return None.
        '''
        counter = 0
        remove_counter = None
        for event in self.event_list:
            if event.uuid == completed_event_uuid:
                remove_counter = counter
                completed_event = event
                break
            counter += 1

        #### DEBUG
        if remove_counter is None:
            util.logger_assert(
                'Completing a root event that does not exist')
        #### END DEBUG

        replacement_event = None
        if retry:
            # in certain cases, we do not actually promote each
            # event's priority to boosted.  For instance, if the event
            # is already in process of committing.  However, if that
            # commit goes awry and we backout, we want the replacement
            # event generated to use a boosted event priority, rather
            # than its original priority.
            if counter == 0:
                # new event should be boosted.  
                if not is_boosted_priority(completed_event.get_priority()):
                    # if it wasn't already boosted, that means that we
                    # tried to promote it while it was in the midst of
                    # its commit and we ignored the promotion.
                    # Therefore, we want to apply the promotion on
                    # retry.
                    replacement_priority = generate_boosted_priority(self.last_boosted_complete)
                else:
                    # it was already boosted, just reuse it
                    replacement_priority = completed_event.get_priority()
            else:
                # it was not boosted, just increment the version number
                replacement_priority = completed_event.get_priority()

            rep = RootEventParent(
                self.act_event_map.local_endpoint,util.generate_uuid(),
                replacement_priority)
            
            replacement_event = LockedActiveEvent(rep,self.act_event_map)
            self.event_list[counter] = replacement_event
        else:
            # we are not retrying this event: remove the event from
            # the list and if there are any other outstanding events,
            # check if they should be promoted to boosted status.
            self.event_list.pop(counter)
            if counter == 0:
                self.last_boosted_complete = self.clock.get_timestamp()
                self.promote_first_to_boosted()

        return replacement_event
Beispiel #11
0
 def __init__(self):
     self._mutex = threading.Lock()
     self._uuid = util.generate_uuid()
     self._stopped = False
Beispiel #12
0
    def __init__(self,waldo_classes,host_uuid,conn_obj,global_var_store,*args):
        '''
        @param {dict} waldo_classes --- Contains common utilities
        needed by emitted code, such as WaldoNumVariable
        
        @param {uuid} host_uuid --- The uuid of the host this endpoint
        lives on.
        
        @param {ConnectionObject} conn_obj --- Used to write messages
        to partner endpoint.

        @param {_VariableStore} global_var_store --- Contains the
        peered and endpoint global data for this endpoint.  Will not
        add or remove any peered or endpoint global variables.  Will
        only make calls on them.
        '''
        self._uuid = util.generate_uuid()

        self._endpoint_uuid_str = str(self._uuid)
        
        self._waldo_classes = waldo_classes

        self._clock = waldo_classes['Clock']
        self._act_event_map = waldoActiveEventMap._ActiveEventMap(
            self,self._clock)

        self._conn_obj = conn_obj
        
        # whenever we create a new _ExecutingEvent, we point it at
        # this variable store so that it knows where it can retrieve
        # variables.
        self._global_var_store = global_var_store

        # put service actions into thread pool to be executed
        self._thread_pool = waldo_classes['ThreadPool']

        self._all_endpoints = waldo_classes['AllEndpoints']
        self._all_endpoints.add_endpoint(self)
        
        
        self._host_uuid = host_uuid

        self._signal_queue = Queue.Queue()
        
        # When go through first phase of commit, may need to forward
        # partner's endpoint uuid back to the root, so the endpoint
        # needs to keep track of its partner's uuid.  FIXME: right
        # now, manually setting partner uuids in connection object.
        # And not checking to ensure that the partner endpoint is set
        # before doing additional work. should create a proper
        # handshake instead.
        self._partner_uuid = None

        
        # both sides should run their onCreate methods to entirety
        # before we can execute any additional calls.
        self._ready_lock_ = threading.Lock()
        self._this_side_ready_bool = False
        self._other_side_ready_bool = False

        self._ready_waiting_list_mutex = threading.Lock()
        self._ready_waiting_list = []

        self._conn_obj.register_endpoint(self)

        self._stop_mutex = threading.Lock()
        # has stop been called locally, on the partner, and have we
        # performed cleanup, respectively
        self._stop_called = False
        self._partner_stop_called = False
        self._stop_complete = False
        
        self._stop_blocking_queues = []

        # holds callbacks to call when stop is complete
        self._stop_listener_id_assigner = 0
        self._stop_listeners = {}

        self._conn_failed = False
        self._conn_mutex = threading.Lock()

        # start heartbeat thread
        self._heartbeat = Heartbeat(socket=self._conn_obj, 
            timeout_cb=self.partner_connection_failure,*args)
        self._heartbeat.start()
        self._send_clock_update()