def test_timezones_are_checked_in_deserialization():
    s = datetimes()
    r = Random(1)
    basic = s.to_basic(s.draw_template(r, s.draw_parameter(r)))
    with pytest.raises(UnsatisfiedAssumption):
        dts = datetimes(timezones=[])
        dts.reify(dts.from_basic(basic))
def test_timezones_are_checked_in_deserialization():
    s = datetimes()
    r = Random(1)
    basic = s.to_basic(s.draw_template(r, s.draw_parameter(r)))
    with pytest.raises(UnsatisfiedAssumption):
        dts = datetimes(timezones=[])
        dts.reify(dts.from_basic(basic))
def test_year_bounds_are_respected_in_deserialization():
    s = datetimes()
    r = Random(1)
    template = s.draw_template(r, s.draw_parameter(r))
    year = s.reify(template).year
    basic = s.to_basic(template)
    above = datetimes(min_year=year + 1)
    below = datetimes(max_year=year - 1)
    with pytest.raises(BadData):
        above.from_basic(basic)
    with pytest.raises(BadData):
        below.from_basic(basic)
Exemple #4
0
def test_year_bounds_are_respected_in_deserialization():
    s = datetimes()
    r = Random(1)
    template = s.draw_template(r, s.draw_parameter(r))
    year = s.reify(template).year
    basic = s.to_basic(template)
    above = datetimes(min_year=year + 1)
    below = datetimes(max_year=year - 1)
    with pytest.raises(BadData):
        above.from_basic(basic)
    with pytest.raises(BadData):
        below.from_basic(basic)
def test_validates_timezone_name_from_db():
    s = datetimes(allow_naive=False)
    template = some_template(s)
    basic = s.to_basic(template)
    basic[-1] = u"Cabbage"
    with pytest.raises(BadData):
        s.from_basic(basic)
Exemple #6
0
def test_overflow_in_simplify():
    """This is a test that we don't trigger a pytz bug when we're simplifying
    around MINYEAR where valid dates can produce an overflow error."""
    minimal(
        datetimes(max_year=MINYEAR),
        lambda x: x.tzinfo != pytz.UTC
    )
Exemple #7
0
def test_bordering_on_a_leap_year():
    x = find(datetimes(min_year=2002, max_year=2005),
             lambda x: x.month == 2 and x.day == 29,
             settings=settings(database=None,
                               max_examples=10**7,
                               timeout=unlimited))
    assert x.year == 2004
Exemple #8
0
def test_bordering_on_a_leap_year():
    x = minimal(
        datetimes(min_year=2002, max_year=2005),
        lambda x: x.month == 2 and x.day == 29,
        settings=settings(database=None, max_examples=10 ** 7)
    )
    assert x.year == 2004
Exemple #9
0
def field_mappings():
    global __default_field_mappings

    if __default_field_mappings is None:
        __default_field_mappings = {
            dm.SmallIntegerField:
            st.integers(-32768, 32767),
            dm.IntegerField:
            st.integers(-2147483648, 2147483647),
            dm.BigIntegerField:
            st.integers(-9223372036854775808, 9223372036854775807),
            dm.PositiveIntegerField:
            st.integers(0, 2147483647),
            dm.PositiveSmallIntegerField:
            st.integers(0, 32767),
            dm.BinaryField:
            st.binary(),
            dm.BooleanField:
            st.booleans(),
            dm.CharField:
            st.text(),
            dm.TextField:
            st.text(),
            dm.DateTimeField:
            datetimes(allow_naive=False),
            dm.EmailField:
            ff.fake_factory(u'email'),
            dm.FloatField:
            st.floats(),
            dm.NullBooleanField:
            st.one_of(st.none(), st.booleans()),
        }
    return __default_field_mappings
Exemple #10
0
def test_validates_timezone_name_from_db():
    s = datetimes(allow_naive=False)
    template = some_template(s)
    basic = s.to_basic(template)
    basic[-1] = u"Cabbage"
    with pytest.raises(BadData):
        s.from_basic(basic)
Exemple #11
0
class CustomJSONEncoderTestCase(unittest.TestCase):
    @given(st.dictionaries(keys=st.text(), values=st.text()))
    def test_encoder_with_text(self, data):
        assert serialize_utils.json_dumps(data) == json.dumps(data)

    @given(st.dictionaries(keys=st.integers(), values=st.integers()))
    def test_encode_with_integers(self, data):
        assert serialize_utils.json_dumps(data) == json.dumps(data)

    @given(st.dictionaries(keys=st.text(), values=st.uuids()))
    def test_encode_with_uuids(self, data):

        assume(data != {})

        with pytest.raises(TypeError):
            json.dumps(data)

        serialize_utils.json_dumps(data)

    @given(st.dictionaries(keys=st.text(), values=datetimes()))
    def test_encode_with_datetimes(self, data):

        assume(data != {})

        with pytest.raises(TypeError):
            json.dumps(data)

        serialize_utils.json_dumps(data)
Exemple #12
0
def test_overflow_in_simplify():
    """This is a test that we don't trigger a pytz bug when we're simplifying
    around MINYEAR where valid dates can produce an overflow error."""
    minimal(
        datetimes(max_year=MINYEAR),
        lambda x: x.tzinfo != pytz.UTC
    )
Exemple #13
0
def test_validates_year_arguments_in_range():
    with pytest.raises(InvalidArgument):
        datetimes(min_year=-10**6).example()
    with pytest.raises(InvalidArgument):
        datetimes(max_year=-10**6).example()
    with pytest.raises(InvalidArgument):
        datetimes(min_year=10**6).example()
    with pytest.raises(InvalidArgument):
        datetimes(max_year=10**6).example()
Exemple #14
0
def test_validates_year_arguments_in_range():
    with pytest.raises(InvalidArgument):
        datetimes(min_year=-10 ** 6).example()
    with pytest.raises(InvalidArgument):
        datetimes(max_year=-10 ** 6).example()
    with pytest.raises(InvalidArgument):
        datetimes(min_year=10 ** 6).example()
    with pytest.raises(InvalidArgument):
        datetimes(max_year=10 ** 6).example()
Exemple #15
0
def panicing_certs_fixture(draw):
    now = draw(datetimes(min_year=1971, max_year=2030, timezones=[]))
    panic = timedelta(
        seconds=draw(s.integers(min_value=60, max_value=60 * 60 * 24)))
    certs = dict(
        draw(
            s.lists(panicing_cert(now, panic),
                    min_size=1,
                    unique_by=lambda i: i[0])))
    return AcmeFixture(now=now, panic_interval=panic, certs=certs)
Exemple #16
0
def test_can_draw_times_in_the_final_year():
    last_year = datetimes(min_year=MAXYEAR)
    r = Random(1)
    c = 0
    for _ in hrange(1000):
        try:
            last_year.reify(last_year.draw_and_produce(r))
            c += 1
        except UnsatisfiedAssumption:
            pass
    assert c >= 100
def create_dummy_rate_file(rate_file):
    rates = lists(floats(min_value=0.00001, allow_nan=False, allow_infinity=False), min_size=0, max_size=100).example()
    max_year = datetime.datetime.now().year
    date_times = lists(datetimes(min_year=2016, max_year=max_year), min_size=len(rates),
                       max_size=len(rates)).map(sorted).example()
    with open(rate_file, 'a') as f:
        for date_time, rate in zip(date_times, rates):
            writer = csv.writer(f, lineterminator='\n')
            market_data = [date_time.strftime("%Y-%m-%d %H:%M:%S"), rate]
            writer.writerow(market_data)
    return rates, date_times
Exemple #18
0
def panicing_certs_fixture(draw):
    now = draw(datetimes(min_year=1971, max_year=2030, timezones=[]))
    panic = timedelta(seconds=draw(
        s.integers(min_value=60, max_value=60 * 60 * 24)))
    certs = dict(
        draw(
            s.lists(
                panicing_cert(now, panic),
                min_size=1,
                unique_by=lambda i: i[0])))
    return AcmeFixture(now=now, panic_interval=panic, certs=certs)
Exemple #19
0
def test_can_draw_times_in_the_final_year():
    last_year = datetimes(min_year=MAXYEAR)
    r = Random(1)
    c = 0
    for _ in hrange(1000):
        try:
            last_year.reify(last_year.draw_and_produce(r))
            c += 1
        except UnsatisfiedAssumption:
            pass
    assert c >= 100
Exemple #20
0
def lease_strategy(draw, dataset_id=st.uuids(), node_id=st.uuids()):
    """
    A hypothesis strategy to generate a ``Lease``

    :param dataset_id: A strategy to use to create the dataset_id for the
        Lease.

    :param node_id: A strategy to use to create the node_id for the Lease.
    """
    return Lease(dataset_id=draw(dataset_id),
                 node_id=draw(node_id),
                 expiration=draw(datetimes()))
Exemple #21
0
class APIInputTestCase(unittest.TestCase):
    @given(id_type=st.one_of(
        st.text(), st.sampled_from(epo_utils.constants.VALID_IDTYPES)),
           number=utils.doc_numbers(),
           kind=st.one_of(st.characters(), st.none()),
           country=st.one_of(st.text(max_size=2), st.none()),
           raw_date=st.one_of(hyp_datetime.datetimes(min_year=1900),
                              st.none()),
           date_format=st.sampled_from(_date_formats))
    def test_api_input_raises_ValueError_on_bad_input(self, id_type, number,
                                                      kind, country, raw_date,
                                                      date_format):
        invalid = date_format != '%Y%m%d' and raw_date is not None
        invalid |= (id_type not in epo_utils.constants.VALID_IDTYPES)
        invalid |= country is not None and not country.strip()
        invalid |= kind is not None and not kind.strip()

        date = raw_date.strftime(date_format) if raw_date is not None else None

        if invalid:
            self.assertRaises(ValueError, api.APIInput, id_type, number, kind,
                              country, date)
        else:
            # Assert no error.
            api_input = api.APIInput(id_type, number, kind, country, date)
            self.assertIsInstance(api_input, api.APIInput)

    @given(utils.valid_api_input_args())
    def test_to_id_produces_correct_output(self, args):

        id_type, number, kind, country, date = args
        api_input = api.APIInput(id_type, number, kind, country, date)

        if re.match(r'(\d+[.,/]\d+)+', number) and id_type != 'classification':
            number = '({})'.format(number)

        parts = map(
            str, [i for i in (country, number, kind, date) if i is not None])
        if id_type == 'epodoc':
            expected = ''.join(parts)
        elif id_type == 'classification':
            expected = str(number)
        else:
            expected = '.'.join(parts)

        self.assertEqual(expected, api_input.to_id())

    @given(utils.APIInputs, st.text())
    def test_to_id_raises_ValueError_on_bad_type(self, api_input, new_type):
        assume(new_type not in epo_utils.constants.VALID_IDTYPES)
        api_input.id_type = new_type
        self.assertRaises(ValueError, api_input.to_id)
Exemple #22
0
def _generate_item(atom=False):
    link = fake_factory('url')
    item = {
        'title': _valid_text(allow_empty=False),
        'link': link,
        'description': _valid_text(),
    }
    optional = {
        'content': _valid_text(),
        'author_email': fake_factory('email'),
        'author_name': fake_factory('name'),
        'author_link': fake_factory('url'),
        'pubdate': datetimes(),
        'updateddate': datetimes(),
        # 'comments': to_unicode(comments),
        'unique_id': link if atom else _valid_text(),
        # # 'enclosure': enclosure,
        # # 'categories': st.lists(_valid_text()),
        'item_copyright': _valid_text(),
        'ttl': st.integers(min_value=0),
    }
    item.update([(k, _optional(v)) for (k, v) in optional.items()])
    return item
Exemple #23
0
def lease_strategy(draw, dataset_id=st.uuids(), node_id=st.uuids()):
    """
    A hypothesis strategy to generate a ``Lease``

    :param dataset_id: A strategy to use to create the dataset_id for the
        Lease.

    :param node_id: A strategy to use to create the node_id for the Lease.
    """
    return Lease(
        dataset_id=draw(dataset_id),
        node_id=draw(node_id),
        expiration=draw(datetimes())
    )
Exemple #24
0
def time_attr(draw, time=datetimes(timezones=[],
                                   max_year=datetime.datetime.utcnow().year,
                                   min_year=1900),
              delta=timedelta()):
    """
    Create an a.Time where it's always positive and doesn't have a massive time
    delta.
    """
    t1 = draw(time)
    t2 = t1 + draw(delta)
    # We can't download data from the future...
    assume(t2 < datetime.datetime.utcnow())

    return a.Time(t1, t2)
Exemple #25
0
def _generate_item(atom=False):
    link = fake_factory('url')
    item = {
        'title': _valid_text(allow_empty=False),
        'link': link,
        'description': _valid_text(),
    }
    optional = {
        'content': _valid_text(),
        'author_email': fake_factory('email'),
        'author_name': fake_factory('name'),
        'author_link': fake_factory('url'),
        'pubdate': datetimes(),
        'updateddate': datetimes(),
        # 'comments': to_unicode(comments),
        'unique_id': link if atom else _valid_text(),
        # # 'enclosure': enclosure,
        # # 'categories': st.lists(_valid_text()),
        'item_copyright': _valid_text(),
        'ttl': st.integers(min_value=0),
    }
    item.update([(k, _optional(v)) for (k, v) in optional.items()])
    return item
Exemple #26
0
def commit_statuses(**kwargs):
    """
    Create a strategy for GitHub commit status dicts.

    :param **kwargs: alter the strategy for a particular
        key of the status dict, e.g. state=just(u'success')
        will fix the state key of the dict to that string.
    :return strategy: a strategy.
    """
    base = {
        'updated_at': datetimes(timezones=['UTC']),
        'state': text(),
        'context': text(average_size=2),
        'target_url': text(average_size=2),
    }
    base.update(**kwargs)
    return fixed_dictionaries(base)
Exemple #27
0
def goes_time(draw, time=datetimes(timezones=[],
                                   max_year=datetime.datetime.utcnow().year,
                                   min_year=1981),
              delta=timedelta()):
    """
    Create an a.Time where it's always positive and doesn't have a massive time
    delta.
    """
    t1 = draw(time)
    t2 = t1 + draw(delta)
    # We can't download data from the future...
    assume(t2 < datetime.datetime.utcnow())

    tr = TimeRange(t1, t2)
    assume(parse_time("1983-05-01") not in tr)

    return a.Time(tr)
Exemple #28
0
def valid_api_input_args():
    """ Args-tuple builder for `epo_utils.ops.api.APIInput` """
    id_type = st.sampled_from(epo_utils.constants.VALID_IDTYPES)
    number = doc_numbers()
    kind = st.one_of(st.text(min_size=1, alphabet=_non_whitespace), st.none())
    country = st.one_of(
        st.text(alphabet=string.ascii_letters, min_size=1, max_size=2),
        st.none())
    raw_date = st.one_of(hyp_datetime.datetimes(min_year=1900), st.none())

    def date_builder(raw):
        if raw is None:
            return raw
        else:
            return raw.strftime('%Y%m%d')

    date = st.builds(date_builder, raw_date)
    return st.tuples(id_type, number, kind, country, date)
Exemple #29
0
def field_mappings():
    global __default_field_mappings

    if __default_field_mappings is None:
        __default_field_mappings = {
            dm.SmallIntegerField: st.integers(-32768, 32767),
            dm.IntegerField: st.integers(-2147483648, 2147483647),
            dm.BigIntegerField:
                st.integers(-9223372036854775808, 9223372036854775807),
            dm.PositiveIntegerField: st.integers(0, 2147483647),
            dm.PositiveSmallIntegerField: st.integers(0, 32767),
            dm.BinaryField: st.binary(),
            dm.BooleanField: st.booleans(),
            dm.DateTimeField: datetimes(allow_naive=False),
            dm.FloatField: st.floats(),
            dm.NullBooleanField: st.one_of(st.none(), st.booleans()),
        }
    return __default_field_mappings
Exemple #30
0
def test_can_generate_non_utc():
    minimal(
        datetimes(),
        lambda d: assume(d.tzinfo) and d.tzinfo.zone != u'UTC')
Exemple #31
0
@pytest.mark.online
@pytest.mark.parametrize(
    'query',
    [(a.Time('2012/10/4', '2012/10/6') & a.Instrument('eve') & a.Level(0))])
def test_fido(query):
    qr = Fido.search(query)
    client = qr.get_response(0).client
    assert isinstance(qr, UnifiedResponse)
    assert isinstance(client, eve.EVEClient)
    response = Fido.fetch(qr)
    assert len(response) == qr._numfile


@pytest.mark.online
@given(time_attr(time=datetimes(timezones=[], max_year=datetime.datetime.utcnow().year, min_year=2010)))
@settings(max_examples=2, timeout=240)
def test_levels(time):
    """
    Test the correct handling of level 0 / 1.
    The default should be level 1 from VSO, level 0 comes from EVEClient.
    """
    eve_a = a.Instrument('EVE')
    qr = Fido.search(time, eve_a)
    client = qr.get_response(0).client
    assert isinstance(client, VSOClient)

    qr = Fido.search(time, eve_a, a.Level(0))
    client = qr.get_response(0).client
    assert isinstance(client, eve.EVEClient)
Exemple #32
0
class verifyingstatemachine(RuleBasedStateMachine):
    """This defines the set of acceptable operations on a Mercurial repository
    using Hypothesis's RuleBasedStateMachine.

    The general concept is that we manage multiple repositories inside a
    repos/ directory in our temporary test location. Some of these are freshly
    inited, some are clones of the others. Our current working directory is
    always inside one of these repositories while the tests are running.

    Hypothesis then performs a series of operations against these repositories,
    including hg commands, generating contents and editing the .hgrc file.
    If these operations fail in unexpected ways or behave differently in
    different configurations of Mercurial, the test will fail and a minimized
    .t test file will be written to the hypothesis-generated directory to
    exhibit that failure.

    Operations are defined as methods with @rule() decorators. See the
    Hypothesis documentation at
    http://hypothesis.readthedocs.org/en/release/stateful.html for more
    details."""

    # A bundle is a reusable collection of previously generated data which may
    # be provided as arguments to future operations.
    repos = Bundle('repos')
    paths = Bundle('paths')
    contents = Bundle('contents')
    branches = Bundle('branches')
    committimes = Bundle('committimes')

    def __init__(self):
        super(verifyingstatemachine, self).__init__()
        self.repodir = os.path.join(testtmp, "repos")
        if os.path.exists(self.repodir):
            shutil.rmtree(self.repodir)
        os.chdir(testtmp)
        self.log = []
        self.failed = False
        self.configperrepo = {}
        self.all_extensions = set()
        self.non_skippable_extensions = set()

        self.mkdirp("repos")
        self.cd("repos")
        self.mkdirp("repo1")
        self.cd("repo1")
        self.hg("init")

    def teardown(self):
        """On teardown we clean up after ourselves as usual, but we also
        do some additional testing: We generate a .t file based on our test
        run using run-test.py -i to get the correct output.

        We then test it in a number of other configurations, verifying that
        each passes the same test."""
        super(verifyingstatemachine, self).teardown()
        try:
            shutil.rmtree(self.repodir)
        except OSError:
            pass
        ttest = os.linesep.join("  " + l for l in self.log)
        os.chdir(testtmp)
        path = os.path.join(testtmp, "test-generated.t")
        with open(path, 'w') as o:
            o.write(ttest + os.linesep)
        with open(os.devnull, "w") as devnull:
            rewriter = subprocess.Popen(
                [runtests, "--local", "-i", path],
                stdin=subprocess.PIPE,
                stdout=devnull,
                stderr=devnull,
            )
            rewriter.communicate("yes")
            with open(path, 'r') as i:
                ttest = i.read()

        e = None
        if not self.failed:
            try:
                output = subprocess.check_output(
                    [runtests, path, "--local", "--pure"],
                    stderr=subprocess.STDOUT)
                assert "Ran 1 test" in output, output
                for ext in (self.all_extensions -
                            self.non_skippable_extensions):
                    tf = os.path.join(testtmp,
                                      "test-generated-no-%s.t" % (ext, ))
                    with open(tf, 'w') as o:
                        for l in ttest.splitlines():
                            if l.startswith("  $ hg"):
                                l = l.replace(
                                    "--config %s=" %
                                    (extensionconfigkey(ext), ), "")
                            o.write(l + os.linesep)
                    with open(tf, 'r') as r:
                        t = r.read()
                        assert ext not in t, t
                    output = subprocess.check_output([
                        runtests,
                        tf,
                        "--local",
                    ],
                                                     stderr=subprocess.STDOUT)
                    assert "Ran 1 test" in output, output
            except subprocess.CalledProcessError as e:
                note(e.output)
        if self.failed or e is not None:
            with open(savefile, "wb") as o:
                o.write(ttest)
        if e is not None:
            raise e

    def execute_step(self, step):
        try:
            return super(verifyingstatemachine, self).execute_step(step)
        except (HypothesisException, KeyboardInterrupt):
            raise
        except Exception:
            self.failed = True
            raise

    # Section: Basic commands.
    def mkdirp(self, path):
        if os.path.exists(path):
            return
        self.log.append("$ mkdir -p -- %s" %
                        (pipes.quote(os.path.relpath(path)), ))
        os.makedirs(path)

    def cd(self, path):
        path = os.path.relpath(path)
        if path == ".":
            return
        os.chdir(path)
        self.log.append("$ cd -- %s" % (pipes.quote(path), ))

    def hg(self, *args):
        extra_flags = []
        for key, value in self.config.items():
            extra_flags.append("--config")
            extra_flags.append("%s=%s" % (key, value))
        self.command("hg", *(tuple(extra_flags) + args))

    def command(self, *args):
        self.log.append("$ " + ' '.join(map(pipes.quote, args)))
        subprocess.check_output(args, stderr=subprocess.STDOUT)

    # Section: Set up basic data
    # This section has no side effects but generates data that we will want
    # to use later.
    @rule(target=paths,
          source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
    def genpath(self, source):
        return source

    @rule(target=committimes,
          when=datetimes(min_year=1970, max_year=2038) | st.none())
    def gentime(self, when):
        return when

    @rule(target=contents,
          content=st.one_of(st.binary(),
                            st.text().map(lambda x: x.encode('utf-8'))))
    def gencontent(self, content):
        return content

    @rule(
        target=branches,
        name=safetext,
    )
    def genbranch(self, name):
        return name

    @rule(target=paths, source=paths)
    def lowerpath(self, source):
        return source.lower()

    @rule(target=paths, source=paths)
    def upperpath(self, source):
        return source.upper()

    # Section: Basic path operations
    @rule(path=paths, content=contents)
    def writecontent(self, path, content):
        self.unadded_changes = True
        if os.path.isdir(path):
            return
        parent = os.path.dirname(path)
        if parent:
            try:
                self.mkdirp(parent)
            except OSError:
                # It may be the case that there is a regular file that has
                # previously been created that has the same name as an ancestor
                # of the current path. This will cause mkdirp to fail with this
                # error. We just turn this into a no-op in that case.
                return
        with open(path, 'wb') as o:
            o.write(content)
        self.log.append(("$ python -c 'import binascii; "
                         "print(binascii.unhexlify(\"%s\"))' > %s") % (
                             binascii.hexlify(content),
                             pipes.quote(path),
                         ))

    @rule(path=paths)
    def addpath(self, path):
        if os.path.exists(path):
            self.hg("add", "--", path)

    @rule(path=paths)
    def forgetpath(self, path):
        if os.path.exists(path):
            with acceptableerrors("file is already untracked", ):
                self.hg("forget", "--", path)

    @rule(s=st.none() | st.integers(0, 100))
    def addremove(self, s):
        args = ["addremove"]
        if s is not None:
            args.extend(["-s", str(s)])
        self.hg(*args)

    @rule(path=paths)
    def removepath(self, path):
        if os.path.exists(path):
            with acceptableerrors(
                    'file is untracked',
                    'file has been marked for add',
                    'file is modified',
            ):
                self.hg("remove", "--", path)

    @rule(
        message=safetext,
        amend=st.booleans(),
        when=committimes,
        addremove=st.booleans(),
        secret=st.booleans(),
        close_branch=st.booleans(),
    )
    def maybecommit(self, message, amend, when, addremove, secret,
                    close_branch):
        command = ["commit"]
        errors = ["nothing changed"]
        if amend:
            errors.append("cannot amend public changesets")
            command.append("--amend")
        command.append("-m" + pipes.quote(message))
        if secret:
            command.append("--secret")
        if close_branch:
            command.append("--close-branch")
            errors.append("can only close branch heads")
        if addremove:
            command.append("--addremove")
        if when is not None:
            if when.year == 1970:
                errors.append('negative date value')
            if when.year == 2038:
                errors.append('exceeds 32 bits')
            command.append("--date=%s" %
                           (when.strftime('%Y-%m-%d %H:%M:%S %z'), ))

        with acceptableerrors(*errors):
            self.hg(*command)

    # Section: Repository management
    @property
    def currentrepo(self):
        return os.path.basename(os.getcwd())

    @property
    def config(self):
        return self.configperrepo.setdefault(self.currentrepo, {})

    @rule(
        target=repos,
        source=repos,
        name=reponames,
    )
    def clone(self, source, name):
        if not os.path.exists(os.path.join("..", name)):
            self.cd("..")
            self.hg("clone", source, name)
            self.cd(name)
        return name

    @rule(
        target=repos,
        name=reponames,
    )
    def fresh(self, name):
        if not os.path.exists(os.path.join("..", name)):
            self.cd("..")
            self.mkdirp(name)
            self.cd(name)
            self.hg("init")
        return name

    @rule(name=repos)
    def switch(self, name):
        self.cd(os.path.join("..", name))
        assert self.currentrepo == name
        assert os.path.exists(".hg")

    @rule(target=repos)
    def origin(self):
        return "repo1"

    @rule()
    def pull(self, repo=repos):
        with acceptableerrors(
                "repository default not found",
                "repository is unrelated",
        ):
            self.hg("pull")

    @rule(newbranch=st.booleans())
    def push(self, newbranch):
        with acceptableerrors(
                "default repository not configured",
                "no changes found",
        ):
            if newbranch:
                self.hg("push", "--new-branch")
            else:
                with acceptableerrors("creates new branches"):
                    self.hg("push")

    # Section: Simple side effect free "check" operations
    @rule()
    def log(self):
        self.hg("log")

    @rule()
    def verify(self):
        self.hg("verify")

    @rule()
    def diff(self):
        self.hg("diff", "--nodates")

    @rule()
    def status(self):
        self.hg("status")

    @rule()
    def export(self):
        self.hg("export")

    # Section: Branch management
    @rule()
    def checkbranch(self):
        self.hg("branch")

    @rule(branch=branches)
    def switchbranch(self, branch):
        with acceptableerrors(
                'cannot use an integer as a name',
                'cannot be used in a name',
                'a branch of the same name already exists',
                'is reserved',
        ):
            self.hg("branch", "--", branch)

    @rule(branch=branches, clean=st.booleans())
    def update(self, branch, clean):
        with acceptableerrors(
                'unknown revision',
                'parse error',
        ):
            if clean:
                self.hg("update", "-C", "--", branch)
            else:
                self.hg("update", "--", branch)

    # Section: Extension management
    def hasextension(self, extension):
        return extensionconfigkey(extension) in self.config

    def commandused(self, extension):
        assert extension in self.all_extensions
        self.non_skippable_extensions.add(extension)

    @rule(extension=extensions)
    def addextension(self, extension):
        self.all_extensions.add(extension)
        self.config[extensionconfigkey(extension)] = ""

    @rule(extension=extensions)
    def removeextension(self, extension):
        self.config.pop(extensionconfigkey(extension), None)

    # Section: Commands from the shelve extension
    @rule()
    @precondition(lambda self: self.hasextension("shelve"))
    def shelve(self):
        self.commandused("shelve")
        with acceptableerrors("nothing changed"):
            self.hg("shelve")

    @rule()
    @precondition(lambda self: self.hasextension("shelve"))
    def unshelve(self):
        self.commandused("shelve")
        with acceptableerrors("no shelved changes to apply"):
            self.hg("unshelve")
Exemple #33
0
def test_can_generate_non_naive_datetime():
    assert minimal(datetimes(allow_naive=True),
                   lambda d: d.tzinfo).tzinfo == pytz.UTC
Exemple #34
0
def test_simplifies_towards_midnight():
    d = minimal(datetimes())
    assert d.hour == 0
    assert d.minute == 0
    assert d.second == 0
    assert d.microsecond == 0
Exemple #35
0
def test_min_year_is_respected():
    assert minimal(datetimes(min_year=2003)).year == 2003
Exemple #36
0
def test_can_find_each_month():
    for month in hrange(1, 13):
        find_any(datetimes(), lambda x: x.month == month)
Exemple #37
0
def test_can_find_after_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year > 2000).year == 2001
Exemple #38
0
def test_can_find_after_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year > 2000).year == 2001
Exemple #39
0
def test_restricts_to_allowed_set_of_timezones():
    timezones = list(map(pytz.timezone, list(pytz.all_timezones)[:3]))
    x = minimal(datetimes(timezones=timezones))
    assert any(tz.zone == x.tzinfo.zone for tz in timezones)
Exemple #40
0
from __future__ import division, print_function, absolute_import

from datetime import MINYEAR

import pytz
import pytest

import hypothesis._settings as hs
from hypothesis import given, assume, settings
from hypothesis.errors import InvalidArgument
from hypothesis.strategytests import strategy_test_suite
from hypothesis.extra.datetime import datetimes
from hypothesis.internal.debug import minimal
from hypothesis.internal.compat import hrange

TestStandardDescriptorFeatures1 = strategy_test_suite(datetimes())
TestStandardDescriptorFeatures2 = strategy_test_suite(
    datetimes(allow_naive=False))
TestStandardDescriptorFeatures3 = strategy_test_suite(
    datetimes(timezones=[]),
)


def test_can_find_after_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year > 2000).year == 2001


def test_can_find_before_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year < 2000).year == 1999

Exemple #41
0
def test_can_find_non_midnight():
    assert minimal(datetimes(), lambda x: x.hour != 0).hour == 1
Exemple #42
0
def test_can_find_before_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year < 2000).year == 1999
Exemple #43
0
def test_max_year_is_respected():
    assert minimal(datetimes(max_year=1998)).year == 1998
Exemple #44
0
def test_can_find_before_the_year_2000():
    assert minimal(datetimes(), lambda x: x.year < 2000).year == 1999
Exemple #45
0
def test_can_find_each_month():
    for i in hrange(1, 12):
        minimal(datetimes(), lambda x: x.month == i)
Exemple #46
0
def test_can_find_midnight():
    datetimes().filter(lambda x: x.hour == x.minute == x.second == 0).example()
Exemple #47
0
def test_can_find_midnight():
    minimal(
        datetimes(),
        lambda x: (x.hour == 0 and x.minute == 0 and x.second == 0),
    )
Exemple #48
0
def test_can_find_on_the_minute():
    find_any(datetimes(), lambda x: x.second == 0)
Exemple #49
0
def test_can_find_non_midnight():
    assert minimal(datetimes(), lambda x: x.hour != 0).hour == 1
Exemple #50
0
def test_can_generate_naive_datetime():
    find_any(datetimes(allow_naive=True), lambda d: d.tzinfo is None)
Exemple #51
0
def test_can_find_on_the_minute():
    minimal(datetimes(), lambda x: x.second != 0)
Exemple #52
0
def test_can_generate_non_utc():
    datetimes().filter(
        lambda d: assume(d.tzinfo) and d.tzinfo.zone != u'UTC').example()
Exemple #53
0
def test_simplifies_towards_midnight():
    d = minimal(datetimes())
    assert d.hour == 0
    assert d.minute == 0
    assert d.second == 0
    assert d.microsecond == 0
Exemple #54
0
        # with a v1 config.
        result = migrate_configuration(1, 3, self.v1_config, StubMigration)
        # Compare the v1 --> v3 upgrade to the direct result of the
        # v2 --> v3 upgrade on the v2 config, Both should be identical
        # and valid v3 configs.
        self.assertEqual(result, StubMigration.upgrade_from_v2(v2_config))


DATASETS = st.builds(
    Dataset,
    dataset_id=st.uuids(),
    maximum_size=st.integers(),
)

# `datetime`s accurate to seconds
DATETIMES_TO_SECONDS = datetimes().map(lambda d: d.replace(microsecond=0))

LEASES = st.builds(Lease,
                   dataset_id=st.uuids(),
                   node_id=st.uuids(),
                   expiration=st.one_of(st.none(), DATETIMES_TO_SECONDS))

# Constrain primary to be True so that we don't get invariant errors from Node
# due to having two differing manifestations of the same dataset id.
MANIFESTATIONS = st.builds(Manifestation,
                           primary=st.just(True),
                           dataset=DATASETS)
IMAGES = st.builds(DockerImage, tag=st.text(), repository=st.text())
NONE_OR_INT = st.one_of(st.none(), st.integers())
ST_PORTS = st.integers(min_value=1, max_value=65535)
PORTS = st.builds(Port, internal_port=ST_PORTS, external_port=ST_PORTS)
Exemple #55
0
def test_can_generate_naive_datetime():
    minimal(datetimes(allow_naive=True), lambda d: not d.tzinfo)
Exemple #56
0
def test_needs_permission_for_no_timezones():
    with pytest.raises(InvalidArgument):
        datetimes(allow_naive=False, timezones=[]).example()
Exemple #57
0
def test_can_generate_non_naive_datetime():
    assert minimal(
        datetimes(allow_naive=True), lambda d: d.tzinfo).tzinfo == pytz.UTC