def __init__(self, name, type_t, is_scalar, enums, width, is_signed, is_rand, is_rand_sz): super().__init__(name, is_rand) # width and is_signed only needed for scalar fields self.type_t = type_t self.is_enum = (enums is not None) self.enums = enums self.is_scalar = is_scalar self.width = width self.is_signed = is_signed self.is_rand_sz = is_rand_sz # Holds a cached version of the sum constraint self.sum_expr_btor = None self.sum_expr = None # Holds a cached version of the sum constraint self.product_expr_btor = None self.product_expr = None self.size = FieldScalarModel( "size", 32, False, is_rand_sz) self._set_size(0)
class ConstraintForeachModel(ConstraintScopeModel): def __init__(self, lhs: 'FieldArrayModel'): super().__init__() self.lhs = lhs self.index = FieldScalarModel("index", 32, False, False) self.iterator = FieldScalarModel("iterator", 32, False, False) def build(self, btor) -> 'BoolectorNode': # Unroll the constraints size = int(self.lhs.size.get_val()) ret_l = [] for i in range(size): # Set the index variable self.index.set_val(i) # Build out the constraints for this index # Note: some could be redundant for c in self.constraint_l: ret_l.append(c.build(btor)) return btor.And(*ret_l) def accept(self, v): v.visit_constraint_foreach(self) def clone(self, deep=False) -> 'ConstraintModel': ret = ConstraintForeachModel(self.lhs) if deep: for c in self.constraint_l: ret.constraint_l.append(c.clone(deep)) return ret
def __init__(self, name, is_scalar, width, is_signed, is_rand, is_rand_sz): super().__init__(name, is_rand) # width and is_signed only needed for scalar fields self.is_scalar = is_scalar self.width = width self.is_signed = is_signed self.is_rand_sz = is_rand_sz # Holds a cached version of the sum constraint self.sum_expr_btor = None self.sum_expr = None self.size = FieldScalarModel("size", 32, False, is_rand_sz)
def test_cross(self): stim = FieldCompositeModel("stim", True) f = FieldScalarModel("a", 16, False, True) stim.add_field(f) f2 = FieldScalarModel("b", 16, False, True) stim.add_field(f2) cg = CovergroupModel("cg") cp = CoverpointModel(ExprFieldRefModel(f), "cp1", CoverageOptionsModel()) cg.add_coverpoint(cp) bn = CoverpointBinArrayModel("cp", 0, 1, 16) cp.add_bin_model(bn) cp2 = CoverpointModel(ExprFieldRefModel(f2), "cp2", CoverageOptionsModel()) cg.add_coverpoint(cp2) bn = CoverpointBinArrayModel("cp", 0, 1, 16) cp2.add_bin_model(bn) cr = CoverpointCrossModel("aXb", CoverageOptionsModel()) cr.add_coverpoint(cp) cr.add_coverpoint(cp2) cg.add_coverpoint(cr) gen = GeneratorModel("top") gen.add_field(stim) gen.add_covergroup(cg) gen.finalize() # Need a special randomizer to deal with generators r = Randomizer() count = 0 for i in range(1000): r.do_randomize([gen]) cg.sample() count += 1 cov = cg.get_coverage() print("Coverage: (" + str(i) + ") " + str(cov)) if cov == 100: break self.assertEqual(cg.get_coverage(), 100) # Ensure that we converge relatively quickly self.assertLessEqual(count, (256+16+16))
def test_smoke(self): obj = FieldCompositeModel("obj") a = obj.add_field(FieldScalarModel("a", 8, False, True)) b = obj.add_field(FieldScalarModel("a", 8, False, True)) obj.add_constraint( ConstraintBlockModel("c", [ ConstraintExprModel( ExprBinModel(a.expr(), BinExprType.Lt, b.expr())) ])) rand = Randomizer(RandState(0)) randstate = RandState(0) rand.do_randomize(randstate, SourceInfo("", -1), [obj]) self.assertLess(a.val, b.val)
def visit_scalar_field(self, f: FieldScalarModel): if f.is_used_rand: self.is_x = True self.val = None else: self.is_x = False self.val = f.get_val()
def test_coverpoint_bins(self): stim = FieldCompositeModel("stim", True) f = FieldScalarModel("a", 16, False, True) stim.add_field(f) f2 = FieldScalarModel("b", 16, False, True) stim.add_field(f2) cg = CovergroupModel("cg") cp = CoverpointModel(ExprFieldRefModel(f), "cp1", CoverageOptionsModel()) cg.add_coverpoint(cp) cp.add_bin_model(CoverpointBinArrayModel("bn1", 0, 1, 16)) cp.add_bin_model(CoverpointBinCollectionModel.mk_collection("bn2", RangelistModel([ [17,65535-16-1] ]), 16)) cp.add_bin_model(CoverpointBinArrayModel("bn3", 0, 65535-16, 65535)) cp2 = CoverpointModel(ExprFieldRefModel(f2), "cp2", CoverageOptionsModel()) cg.add_coverpoint(cp2) bn = CoverpointBinArrayModel("cp", 0, 1, 16) cp2.add_bin_model(bn) gen = GeneratorModel("top") gen.add_field(stim) gen.add_covergroup(cg) gen.finalize() # Need a special randomizer to deal with generators r = Randomizer() count = 0 for i in range(1000): r.do_randomize([gen]) cg.sample() count += 1 cov = cg.get_coverage() if cov == 100: break self.assertEqual(cg.get_coverage(), 100) # Ensure that we converge relatively quickly self.assertLessEqual(count, 64)
def test_simple(self): a = FieldScalarModel("a", 16, False, True) b = FieldScalarModel("b", 16, False, True) c = FieldScalarModel("c", 16, False, True) l = ExprLiteralModel(10, False, 8) ab_c = ConstraintBlockModel("ab_c", [ ConstraintImpliesModel( ExprBinModel(ExprFieldRefModel(a), BinExprType.Lt, ExprFieldRefModel(b)), [ ConstraintExprModel( ExprBinModel(ExprFieldRefModel(c), BinExprType.Eq, l)) ]) ]) copy = ConstraintCopyBuilder.copy(ab_c) self.assertEquals(1, len(copy)) self.assertIsNot(ab_c, copy[0])
def test_smoke(self): stim = FieldCompositeModel("stim", True) f = FieldScalarModel("a", 16, False, True) stim.add_field(f) f2 = FieldScalarModel("b", 16, False, True) stim.add_field(f2) cg = CovergroupModel("cg") cp = CoverpointModel(ExprFieldRefModel(f), "cp1", CoverageOptionsModel()) cg.add_coverpoint(cp) bn = CoverpointBinArrayModel("cp", 1, 16) cp.add_bin_model(bn) cp2 = CoverpointModel(ExprFieldRefModel(f2), "cp2", CoverageOptionsModel()) cg.add_coverpoint(cp2) bn = CoverpointBinArrayModel("cp", 1, 16) cp2.add_bin_model(bn) gen = GeneratorModel("top") gen.add_field(stim) gen.add_covergroup(cg) gen.finalize() # Need a special randomizer to deal with generators r = Randomizer(RandState(0)) randstate = RandState(0) count = 0 for i in range(1000): r.do_randomize(randstate, SourceInfo("", -1), [gen]) cg.sample() count += 1 cov = cg.get_coverage() if cov == 100: break self.assertEqual(cg.get_coverage(), 100) # Ensure that we converge relatively quickly self.assertLessEqual(count, 32)
def build_field_model(self, name): self._int_field_info.name = name if self._int_field_info.model is None: self._int_field_info.model = FieldScalarModel( name, self.width, self.is_signed, self._int_field_info.is_rand) self.set_val(self._init_val) else: # Ensure the name matches superstructure self._int_field_info.model.name = name return self._int_field_info.model
def add_field(self) -> FieldScalarModel: fid = len(self.field_l) if self.is_enum: ret = super().add_field( EnumFieldModel(self.name + "[" + str(fid) + "]", self.enums, self.is_declared_rand)) else: ret = super().add_field( FieldScalarModel(self.name + "[" + str(fid) + "]", self.width, self.is_signed, self.is_declared_rand)) # Update the size self._set_size(len(self.field_l)) return ret
def test_wide_var(self): obj = FieldCompositeModel("obj") a = obj.add_field(FieldScalarModel("a", 1024, False, True)) obj.add_constraint( ConstraintBlockModel("c", [ ConstraintExprModel( ExprBinModel( a.expr(), BinExprType.Gt, ExprLiteralModel(0x80000000000000000, False, 72))) ])) randstate = RandState(0) rand = Randomizer(randstate) rand.do_randomize(randstate, SourceInfo("", -1), [obj]) print("a=" + hex(int(a.val))) self.assertGreater(a.val, ValueScalar(0x80000000000000000))
def __init__(self, lhs: 'FieldArrayModel'): super().__init__() self.lhs = lhs self.index = FieldScalarModel("index", 32, False, False) self.iterator = FieldScalarModel("iterator", 32, False, False)
class FieldArrayModel(FieldCompositeModel): """All arrays are processed as if they were variable size.""" def __init__(self, name, is_scalar, enums, width, is_signed, is_rand, is_rand_sz): super().__init__(name, is_rand) # width and is_signed only needed for scalar fields self.is_enum = (enums is not None) self.enums = enums self.is_scalar = is_scalar self.width = width self.is_signed = is_signed self.is_rand_sz = is_rand_sz # Holds a cached version of the sum constraint self.sum_expr_btor = None self.sum_expr = None # Holds a cached version of the sum constraint self.product_expr_btor = None self.product_expr = None self.size = FieldScalarModel("size", 32, False, is_rand_sz) self._set_size(0) def append(self, fm): super().add_field(fm) self._set_size(len(self.field_l)) fm.is_declared_rand = self.is_declared_rand fm.rand_mode = self.is_declared_rand self.name_elems() def clear(self): self.field_l.clear() self._set_size(0) def pop(self, idx=0): self.field_l.pop(idx) self._set_size(len(self.field_l)) self.name_elems() def _set_size(self, sz): if sz != int(self.size.get_val()): self.size.set_val(sz) self.sum_expr = None self.sum_expr_btor = None self.product_expr = None self.product_expr_btor = None def name_elems(self): """Apply an index-based name to all fields""" for i, f in enumerate(self.field_l): f.name = self.name + "[" + str(i) + "]" def pre_randomize(self): # Set the size field for arrays that don't # have a random size if not self.is_rand_sz: self._set_size(len(self.field_l)) FieldCompositeModel.pre_randomize(self) def post_randomize(self): FieldCompositeModel.post_randomize(self) self.sum_expr = None self.sum_expr_btor = None def add_field(self) -> FieldScalarModel: fid = len(self.field_l) if self.is_enum: ret = super().add_field( EnumFieldModel(self.name + "[" + str(fid) + "]", self.enums, self.is_declared_rand)) else: ret = super().add_field( FieldScalarModel(self.name + "[" + str(fid) + "]", self.width, self.is_signed, self.is_declared_rand)) # Update the size self._set_size(len(self.field_l)) return ret def build(self, builder): # Called before randomization self._set_size(len(self.field_l)) super().build(builder) def get_sum_expr(self): if self.sum_expr is None: # Build # Force the result to be 32-bit, in order to # match user expectation ret = ExprLiteralModel(0, self.is_signed, 32) for i in range(int(self.size.get_val())): f = self.field_l[i] if ret is None: ret = ExprFieldRefModel(f) else: ret = ExprBinModel(ret, BinExprType.Add, ExprFieldRefModel(f)) if ret is None: ret = ExprLiteralModel(0, self.is_signed, 32) self.sum_expr = ret return self.sum_expr def build_sum_expr(self, btor): if self.sum_expr_btor is None: self.sum_expr_btor = self.get_sum_expr().build(btor) return self.sum_expr_btor def get_product_expr(self): if self.product_expr is None: # Build # Force the result to be 32-bit, in order to # match user expectation if int(self.size.get_val()) == 0: ret = ExprLiteralModel(0, self.is_signed, 64) else: ret = ExprLiteralModel(1, self.is_signed, 64) for i in range(int(self.size.get_val())): f = self.field_l[i] ret = ExprBinModel(ret, BinExprType.Mul, ExprFieldRefModel(f)) self.product_expr = ret return self.product_expr def build_product_expr(self, btor): if self.product_expr_btor is None: self.product_expr_btor = self.get_product_expr().build(btor) return self.product_expr_btor def accept(self, v): v.visit_field_scalar_array(self)
def visit_scalar_field(self, f: FieldScalarModel): if self.phase == 0: f.build(self.btor)
def build_field_model(self, name): self._int_field_info.name = name self._int_field_info.model = FieldScalarModel( name, self.width, self.is_signed, self._int_field_info.is_rand, self) return self._int_field_info.model
def add_field(self) -> FieldScalarModel: fid = len(self.field_l) return super().add_field( FieldScalarModel(self.name + "[" + str(fid) + "]", self.width, self.is_signed, self.is_declared_rand))
def visit_scalar_field(self, f:FieldScalarModel): f.dispose()
class FieldArrayModel(FieldCompositeModel): """All arrays are processed as if they were variable size.""" def __init__(self, name, is_scalar, width, is_signed, is_rand, is_rand_sz): super().__init__(name, is_rand) # width and is_signed only needed for scalar fields self.is_scalar = is_scalar self.width = width self.is_signed = is_signed self.is_rand_sz = is_rand_sz # Holds a cached version of the sum constraint self.sum_expr_btor = None self.sum_expr = None self.size = FieldScalarModel("size", 32, False, is_rand_sz) def append(self, fm): super().add_field(fm) self.name_elems() def clear(self): self.field_l.clear() self.size.set_val(0) def pop(self, idx=0): self.field_l.pop(idx) self.name_elems() def name_elems(self): """Apply an index-based name to all fields""" for i, f in enumerate(self.field_l): f.name = self.name + "[" + str(i) + "]" def pre_randomize(self): # Set the size field for arrays that don't # have a random size if not self.is_rand_sz: self.size.set_val(len(self.field_l)) FieldCompositeModel.pre_randomize(self) def post_randomize(self): FieldCompositeModel.post_randomize(self) self.sum_expr = None self.sum_expr_btor = None def add_field(self) -> FieldScalarModel: fid = len(self.field_l) return super().add_field( FieldScalarModel(self.name + "[" + str(fid) + "]", self.width, self.is_signed, self.is_declared_rand)) def build(self, builder): # Called before randomization self.size.set_val(int(len(self.field_l))) super().build(builder) def get_sum_expr(self): if self.sum_expr is None: # Build ret = None for f in self.field_l: if ret is None: ret = ExprFieldRefModel(f) else: ret = ExprBinModel(ret, BinExprType.Add, ExprFieldRefModel(f)) if ret is None: ret = ExprLiteralModel(0, False, 32) self.sum_expr = ret return self.sum_expr def build_sum_expr(self, btor): if self.sum_expr_btor is None: self.sum_expr_btor = self.get_sum_expr().build(btor) return self.sum_expr_btor def accept(self, v): v.visit_field_scalar_array(self)