def __init__(self): #self.db = {} self.engine_type = 'randomdb' self.db = RandomChoiceDict() self.db_noise = {} self.log = [] # ALWAYS WORKING WITH LOG PROBABILITIES self.uneval_p = 0 self.eval_p = 0 self.p = 0 self.env = Environment() self.assumes = {} self.observes = {} self.predicts = {} self.vars = {}
def __init__(self): self.engine_type = 'reduced traces' self.assumes = {} # id -> evalnode self.observes = {} # id -> evalnode self.predicts = {} # id -> evalnode self.directives = [] self.db = RandomChoiceDict() self.weighted_db = WeightedRandomChoiceDict() self.choices = {} # hash -> evalnode self.xrps = {} # hash -> (xrp, set of application nodes) self.env = EnvironmentNode() self.p = 0 self.uneval_p = 0 self.eval_p = 0 self.new_to_old_q = 0 self.old_to_new_q = 0 self.debug = False # necessary because of the new XRP interface requiring some state kept while doing inference self.application_reflip = False self.reflip_node = ReducedEvalNode(self, self.env, VarExpression('')) self.nodes = [] self.old_vals = [Value()] self.new_vals = [Value()] self.old_val = Value() self.new_val = Value() self.reflip_xrp = XRP() self.mhstats_details = False self.mhstats = {} self.made_proposals = 0 self.accepted_proposals = 0 self.hashval = rrandom.random.randbelow() return