def grow_domain(domain_states, transitions, depth, validity_test=None): """ Returns domain_states grown by depth along transitions. Resulting states are filtered by the validity_test. By default, only states without a negative coordinate are valid. """ if numpy.size(domain_states) == 0: raise ValueError('there must be at least one state to expand') if validity_test is None: validity_test = non_neg_states expanded_states = domain_states for _ in xrange(depth): # expand support states by one along each state transition for transition in transitions: shifted_states = lexarrayset.shift(domain_states, transition) # filter invalid states (ie states with a negative coord) valid = validity_test(shifted_states) #import pdb #if numpy.sum(valid) != shifted_states.shape[1]: # pdb.set_trace() shifted_states = shifted_states[:, valid] expanded_states = lexarrayset.union(expanded_states, shifted_states) domain_states = expanded_states return expanded_states, expanded_states, 0
def step(self, t, epsilon): """ Advance solution to time ``t`` at the cost of at most ``epsilon`` error. """ step_epsilon = self.solver.restore_point_error + epsilon number_of_states = numpy.size(self.domain_states, 1) step_fail_counter = 0 while True: self.solver.step(t) p, p_sink = self.solver.y if (step_fail_counter !=0 and self.domain_expander.name == 'GORDE'): self.solver.set_restore_point() break if p_sink > step_epsilon: step_fail_counter = step_fail_counter + 1 if step_fail_counter == 1: self.domain_expander.Gtau = self.domain_expander.Gtau_default expanded_states, nabla, u = self.domain_expander.expand( domain_states = self.solver.restore_args['domain_states'], p = self.solver.restore_args['p_0'], p_sink = p_sink, t = t ) else: #print(" Failed first run ") self.domain_expander.Rtau = self.domain_expander.Gtau self.domain_expander.Gtau = self.domain_expander.Gtau**(10**(-2)) some_expanded_states, nabla, u = self.domain_expander.expand( domain_states = nabla, p = u, t = t ) # Need to add the new states to the expanded_states. expanded_states = lexarrayset.union(some_expanded_states,expanded_states) # we need to translate the dense p, defined wrt to # an enum of the domain states, to a dense p wrt to # an enum of these new expanded domain states # get the domain states that p_0 is defined over if True: p_0 = self.solver.restore_args['p_0'] p_0_domain = self.solver.restore_args['domain_enum'].states( numpy.arange(len(p_0)), ) # define the new state enum for the expanded domain expanded_enum = state_enum.StateEnum(expanded_states) # figure out the indices to store each domain state in # according to the new state enum domain_indices = expanded_enum.indices(p_0_domain) # make a new dense distribution expanded_p_0 = numpy.zeros( (expanded_enum.size, ), dtype = numpy.float ) # copy the probabilities from p_0 across into the # correct indices of the expanded version of p_0 expanded_p_0[domain_indices] = p_0 # update fsp solver bookkeeping self.domain_states = expanded_states self.domain_enum = expanded_enum # check that expansion did in fact add some extra states if numpy.size(self.domain_states, 1) <= number_of_states: lament = 'expansion did not increase size of domain' raise ExpansionFailureError(lament) # restore solver to previous state, but use expanded domain self.solver.restore( domain_states = self.domain_states, domain_enum = self.domain_enum, p_0 = expanded_p_0, ) else: self.solver.set_restore_point() break