Example #1
0
def lease_strategy(draw, dataset_id=st.uuids(), node_id=st.uuids()):
    """
    A hypothesis strategy to generate a ``Lease``

    :param dataset_id: A strategy to use to create the dataset_id for the
        Lease.

    :param node_id: A strategy to use to create the node_id for the Lease.
    """
    return Lease(
        dataset_id=draw(dataset_id),
        node_id=draw(node_id),
        expiration=draw(datetimes())
    )
Example #2
0
def jobs(
        draw,
        ids=uuids(),
        statuses=sampled_from(JobInterface.JobStatus),
        parameters=dictionaries(text(), text()),
        results=dictionaries(text(), text()),
        dates_submitted=datetimes(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text())
) -> JobInterface:
    """

    :param draw: A function that can take a strategy and draw a datum from it
    :param ids: A hypothesis strategy (statisticians should read "random
        variable"), that represents the set of all valid job IDs
    :param statuses: A hypothesis strategy that samples from the set of all
        allowed job statuses
    :param parameters: A hypothesis strategy that samples from all job
        parameters
    :param results: A hypothesis strategy that represents the possible results
    :param dates_submitted: A hypothesis strategy that represents the
        possible dates that can be submitted
    :param registration_schemas: The possible job registration schemas
    :param result_schemas: The possible job result schemas
    :return: A randomly-generated implementation of :class:`JobInterface`
    """
    return Job(
        draw(ids), draw(statuses), draw(parameters), draw(results),
        draw(dates_submitted),
        draw(registration_schemas),
        draw(result_schemas)
    )
Example #3
0
def node_strategy(
        draw,
        min_number_of_applications=0,
        stateful_applications=False,
        uuid=st.uuids(),
        applications=application_strategy()
):
    """
    A hypothesis strategy to generate a ``Node``

    :param uuid: The strategy to use to generate the Node's uuid.

    :param applications: The strategy to use to generate the applications on
        the Node.
    """
    applications = {
        a.name: a for a in
        draw(
            st.lists(
                application_strategy(stateful=stateful_applications),
                min_size=min_number_of_applications,
                average_size=2,
                max_size=5
            )
        )
    }
    return Node(
        uuid=draw(uuid),
        applications=applications,
        manifestations={
            a.volume.manifestation.dataset_id: a.volume.manifestation
            for a in applications.values()
            if a.volume is not None
        }
    )
Example #4
0
def node_strategy(
        draw,
        uuid=st.uuids(),
        applications=application_strategy()
):
    """
    A hypothesis strategy to generate a ``Node``

    :param uuid: The strategy to use to generate the Node's uuid.

    :param applications: The strategy to use to generate the applications on
        the Node.
    """
    applications = draw(st.lists(
        application_strategy(),
        min_size=0,
        average_size=2,
        max_size=5
    ))
    return Node(
        uuid=draw(uuid),
        applications={
            a.name: a
            for a in applications
        }
    )
def services(
        draw,
        service_id=uuids(), name=text(), description=text(),
        is_available=booleans()
) -> IService:
    return Service(
        draw(service_id), draw(name), draw(description), draw(is_available)
    )
Example #6
0
def interleaved_strategy_factory():
    '''
    Generate interleaved fastq that guarantees ids are same for pairs
    *_kwargs are supplied to gen seq_rec_strategy_factory
    to customize forward and reverse reads
    '''
    strategy = st.uuids().map(str).flatmap(
        lambda id:
            st.tuples(
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id)),
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id))))
    return strategy
Example #7
0
def field_mappings():
    global __default_field_mappings

    if __default_field_mappings is None:
        # Sized fields are handled in _get_strategy_for_field()
        # URL fields are not yet handled
        __default_field_mappings = {
            dm.SmallIntegerField:
            st.integers(-32768, 32767),
            dm.IntegerField:
            st.integers(-2147483648, 2147483647),
            dm.BigIntegerField:
            st.integers(-9223372036854775808, 9223372036854775807),
            dm.PositiveIntegerField:
            st.integers(0, 2147483647),
            dm.PositiveSmallIntegerField:
            st.integers(0, 32767),
            dm.BinaryField:
            st.binary(),
            dm.BooleanField:
            st.booleans(),
            dm.DateField:
            st.dates(),
            dm.DateTimeField:
            st.datetimes(timezones=get_tz_strat()),
            dm.DurationField:
            st.timedeltas(),
            dm.EmailField:
            emails(),
            dm.FloatField:
            st.floats(),
            dm.NullBooleanField:
            st.one_of(st.none(), st.booleans()),
            dm.TimeField:
            st.times(timezones=get_tz_strat()),
            dm.UUIDField:
            st.uuids(),
        }

        # SQLite does not support timezone-aware times, or timedeltas that
        # don't fit in six bytes of microseconds, so we override those
        db = getattr(django_settings, 'DATABASES', {}).get('default', {})
        if db.get('ENGINE', '').endswith('.sqlite3'):  # pragma: no branch
            sqlite_delta = timedelta(microseconds=2**47 - 1)
            __default_field_mappings.update({
                dm.TimeField:
                st.times(),
                dm.DurationField:
                st.timedeltas(-sqlite_delta, sqlite_delta),
            })

    return __default_field_mappings
def get_selection_poorly_formed(
    draw: _DrawType, ids=uuids(), bools=booleans(), txt=text(), vote=integers(0, 1)
) -> Tuple[str, PlaintextBallotSelection]:
    use_none = draw(bools)
    if use_none:
        extra_data = None
    else:
        extra_data = draw(txt)
    object_id = f"selection-{draw(ids)}"
    return (
        object_id,
        PlaintextBallotSelection(object_id, draw(vote), draw(bools), extra_data),
    )
Example #9
0
def nodes(draw: Drawable) -> Node:
    "Generates Node objects"
    return Node(
        name=draw(text()),
        typeOf=draw(text()),
        properties=draw(
            dictionaries(
                keys=uuids().map(str),
                values=one_of(properties(), boolean_properties(),
                              percentage_properties()),
                max_size=5,
            )),
    )
Example #10
0
def node_uuid_pool_strategy(draw, min_number_of_nodes=1):
    """
    A strategy to create a pool of node uuids.

    :param min_number_of_nodes: The minimum number of nodes to create.

    :returns: A strategy to create an iterable of node uuids.
    """
    max_number_of_nodes = max(min_number_of_nodes, 10)
    return draw(
        st.lists(uuids(),
                 min_size=min_number_of_nodes,
                 max_size=max_number_of_nodes))
Example #11
0
def project_creation_request(draw):
    return draw(
        sampled_from([
            {
                "name": draw(text()),
                "description": draw(text())
            },
            {
                "name": draw(text()),
                "description": draw(text()),
                "groups": [str(x) for x in draw(lists(uuids(version=4)))],
            },
        ]))
Example #12
0
class TestCompileGeneral(TestCase):
    @given(literals
           | st.dates()
           | st.datetimes()
           | st.decimals(allow_nan=False)
           | st.fractions()
           | st.timedeltas()
           | st.times()
           | st.uuids())
    def test_compile_pickle(self, form):
        self.assertEqual(form, eval(compiler.Compiler().pickle(form)))

    @given(literals)
    def test_compile_literal(self, form):
        self.assertEqual(form, eval(compiler.Compiler().atom(form)))

    def test_maybe_macro_error(self):
        with self.assertRaises(compiler.CompileError):
            compiler.readerless(('hissp.basic.._macro_.foobar', ))

    def test_post_compile_warn(self):
        c = compiler.Compiler('oops')
        with self.assertWarns(compiler.PostCompileWarning):
            python = c.compile([
                (
                    'operator..truediv',
                    0,
                    0,
                ),
                (
                    'print',
                    (
                        'quote',
                        'oops',
                    ),
                ),
            ])
        self.assertIn(
            """\
__import__('operator').truediv(
  (0),
  (0))

# Traceback (most recent call last):""", python)
        self.assertIn(
            """\
# ZeroDivisionError: division by zero
# 

print(
  'oops')""", python)
Example #13
0
def process_props() -> st.SearchStrategy[ProcessProps]:
    return st.builds(
        ProcessProps,
        st.builds(
            dict,
            node_key=st.uuids(),
            process_id=st.integers(min_value=1, max_value=2**32),
            created_timestamp=st.integers(min_value=0, max_value=2**48),
            terminate_time=st.integers(min_value=0, max_value=2**48),
            image_name=st.text(),
            process_name=st.text(),
            arguments=st.text(),
        ),
    )
Example #14
0
def get_selection_well_formed(
    draw: _DrawType, uuids=uuids(), bools=booleans(), text=text()
) -> Tuple[str, PlaintextBallotSelection]:
    use_none = draw(bools)
    if use_none:
        extra_data = None
    else:
        extra_data = draw(text)
    object_id = f"selection-{draw(uuids)}"
    return (
        object_id,
        PlaintextBallotSelection(object_id, f"{draw(bools)}", draw(bools),
                                 extra_data),
    )
Example #15
0
def address(
        draw,
        id=st.uuids(),
        door_number=st.integers(),
        house_name=st.one_of(st.text() | st.none()),
        street_name=st.text(),
        geo_position=geo_positions(),
) -> Address:
    return Address(
        draw(id),
        draw(door_number),
        draw(house_name),
        draw(street_name),
        draw(geo_position),
    )
Example #16
0
def services(
    draw,
    ids=uuids(),
    names=text(),
    descriptions=text(),
    registration_schemas=dictionaries(text(), text()),
    result_schemas=dictionaries(text(), text()),
    are_available=booleans(),
    service_job_lists=job_lists(),
    timeouts=timedeltas()
) -> ServiceInterface:
    return Service(draw(ids), draw(names), draw(descriptions),
                   draw(registration_schemas), draw(result_schemas),
                   draw(are_available), draw(service_job_lists),
                   draw(timeouts))
Example #17
0
def openfile(draw,
             mode=st.sampled_from(('rb', 'wb', 'wb+', 'rb+')),
             precontent=st.binary(),
             postcontent=st.binary()):
    mode_ = draw(mode)
    return ('openfile', {
        'path':
        str(draw(st.uuids())),
        'mode':
        mode_,
        'precontent':
        draw(precontent) if 'r' in mode_ else None,
        'postcontent':
        draw(postcontent) if set('+w') & set(mode_) else None
    })
Example #18
0
class TestServiceDecorator(TestEndpointForService):
    """
    Contains tests for the service decorator
    """
    @given(text(), service_lists())
    def test_input_not_uuid(self, service_id: str,
                            service_list: ServiceList) -> None:
        """

        :param service_id: The randomly-generated ID to test
        :param service_list: A randomly-generated set of services
        :return:
        """
        assume(not self._is_uuid(service_id))
        endpoint = self.ServiceEndpointSubtype(service_list)

        with self.assertRaises(NotUUIDError):
            endpoint.get(service_id)

    @given(uuids(), service_lists())
    def test_input_service_not_found(self, service_id: UUID,
                                     service_list: ServiceList) -> None:
        assume(service_id not in (service.id for service in service_list))
        endpoint = self.ServiceEndpointSubtype(service_list)

        with self.assertRaises(ServiceWithUUIDNotFound):
            endpoint.get(str(service_id))

    @given(service_lists())
    @settings(perform_health_check=False)
    def test_get_service(self, service_list: ServiceList):
        assume(len(service_list) > 0)
        service_id = self._first_service_id(service_list)
        endpoint = self.ServiceEndpointSubtype(service_list)
        response = endpoint.get(str(service_id))
        self.assertEqual(response.status_code, 200)

    @staticmethod
    def _is_uuid(service_id: str) -> bool:
        try:
            UUID(service_id)
            return True
        except ValueError:
            return False

    @staticmethod
    def _first_service_id(service_list: ServiceList):
        return next((service.id for service in service_list))
Example #19
0
class TestDelItem(TestJobListRequiringQuery):
    """
    Contains unit tests for the ``__delitem__`` method of the job list. This
    method overloads ``del`` in order to allow easy deletion of jobs
    """
    @given(uuids())
    def test_that_delitem_deletes_the_job(self, job_id: UUID) -> None:
        """

        :param job_id: The ID of the job to delete
        """
        del self.job_list[job_id]
        self.assertEqual(
            mock.call(self.root_query.filter_by(id=job_id).first()),
            self.session.delete.call_args
        )
Example #20
0
def _build_node(applications):
    # All the manifestations in `applications`.
    app_manifestations = set(app.volume.manifestation for app in applications
                             if app.volume)
    # A set that contains all of those, plus an arbitrary set of
    # manifestations.
    dataset_ids = frozenset(app.volume.manifestation.dataset_id
                            for app in applications if app.volume)
    manifestations = (st.sets(
        MANIFESTATIONS.filter(lambda m: m.dataset_id not in dataset_ids)).map(
            pset).map(lambda ms: ms.union(app_manifestations)).map(
                lambda ms: dict((m.dataset.dataset_id, m) for m in ms)))
    return st.builds(Node,
                     uuid=st.uuids(),
                     applications=st.just(applications),
                     manifestations=manifestations)
Example #21
0
def node_uuid_pool_strategy(draw, min_number_of_nodes=1):
    """
    A strategy to create a pool of node uuids.

    :param min_number_of_nodes: The minimum number of nodes to create.

    :returns: A strategy to create an iterable of node uuids.
    """
    max_number_of_nodes = max(min_number_of_nodes, 10)
    return draw(
        st.lists(
            uuids(),
            min_size=min_number_of_nodes,
            max_size=max_number_of_nodes
        )
    )
def plaintext_voted_ballot(draw: _DrawType,
                           metadata: InternalElectionDescription):
    """
    Given an `InternalElectionDescription` object, generates an arbitrary `PlaintextBallot` with the
    choices made randomly.
    :param draw: Hidden argument, used by Hypothesis.
    :param metadata: Any `InternalElectionDescription`
    """

    num_ballot_styles = len(metadata.ballot_styles)
    assert num_ballot_styles > 0, "invalid election with no ballot styles"

    # pick a ballot style at random
    ballot_style = metadata.ballot_styles[draw(
        integers(0, num_ballot_styles - 1))]

    contests = metadata.get_contests_for(ballot_style.object_id)
    assert len(contests) > 0, "invalid ballot style with no contests in it"

    voted_contests: List[PlaintextBallotContest] = []
    for contest in contests:
        assert contest.is_valid(), "every contest needs to be valid"
        n = contest.number_elected  # we need exactly this many 1's, and the rest 0's
        ballot_selections = contest.ballot_selections
        assert len(ballot_selections) >= n

        random = Random(draw(integers()))
        random.shuffle(ballot_selections)
        cut_point = draw(integers(0, n))
        yes_votes = ballot_selections[:cut_point]
        no_votes = ballot_selections[cut_point:]

        voted_selections = [
            selection_from(
                description, is_placeholder=False, is_affirmative=True)
            for description in yes_votes
        ] + [
            selection_from(
                description, is_placeholder=False, is_affirmative=False)
            for description in no_votes
        ]

        voted_contests.append(
            PlaintextBallotContest(contest.object_id, voted_selections))

    return PlaintextBallot(str(draw(uuids())), ballot_style.object_id,
                           voted_contests)
Example #23
0
def valid_unparsed_empty_restic_config(draw):
    config = draw(
        st.fixed_dictionaries({
            "BackupRepositoryFolder":
            st.text(),
            "ExcludePatternsFile":
            st.just(str(EXCLUDE_FILE)) | st.none(),
            "DevicePassCmd":
            st.text(),
            "FilesAndFolders":
            st.just([]),
            "RepositoryPassCmd":
            st.text(),
            "UUID":
            st.uuids().map(str),
        }))
    return config
Example #24
0
class TestGetItem(TestJobListRequiringQuery):
    """
    Contains unit tests for the ``__getitem__`` method of the job list
    """
    @given(uuids())
    def test_that_getitem_gets_a_job_by_query(self, job_id: UUID) -> None:
        """
        Tests that the getitem method correctly returns a job from the DB

        :param job_id: The ID of the job to get
        """
        job = self.job_list[job_id]
        self.assertEqual(
            mock.call(id=job_id), self.root_query.filter_by.call_args
        )
        expected_job_id = self.root_query.filter_by(id=job_id).first().id
        self.assertEqual(expected_job_id, job.id)
Example #25
0
def analysis_status(draw):
    return {
        "apiVersion": draw(text(min_size=1)),
        "harveyVersion": draw(text(min_size=1)),
        "maruVersion": draw(text(min_size=1)),
        "mythrilVersion": draw(text(min_size=1)),
        "queueTime": draw(integers()),
        "status": draw(sampled_from(["Running", "Queued", "Finished"])),
        "submittedAt": draw(datetimes()).isoformat(),
        "submittedBy": draw(text(min_size=1)),
        "uuid": str(draw(uuids(version=4))),
        "runTime": draw(integers()),
        "clientToolName": draw(text()),
        "groupId": draw(text()),
        "groupName": draw(text()),
        "analysisMode": draw(sampled_from(["full", "quick", "standard"])),
    }
Example #26
0
def services(
        draw,
        ids=uuids(),
        names=text(),
        descriptions=text(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text()),
        are_available=booleans(),
        service_job_lists=job_lists(),
        timeouts=timedeltas()
) -> ServiceInterface:
    return Service(
        draw(ids), draw(names), draw(descriptions),
        draw(registration_schemas), draw(result_schemas),
        draw(are_available), draw(service_job_lists),
        draw(timeouts)
    )
Example #27
0
def generate_session_data() -> Dict[str, Any]:
    """Factory method for generating mocked session data."""
    return st.dictionaries(
        st.text(ascii_letters, min_size=5, max_size=20),
        st.recursive(
            st.floats()
            | st.integers()
            | st.text(printable)
            | st.booleans()
            | st.nothing()
            | st.timedeltas()
            | st.times()
            | st.uuids(),
            st.lists,
        ),
        min_size=5,
        max_size=10,
    ).example()
Example #28
0
def dict_and_subset(draw, keys=st.integers(), values=st.uuids()):
    d = draw(st.dictionaries(keys, values, min_size=1))
    subset = sorted(d.keys())

    i1 = draw(st.integers(min_value=0, max_value=len(subset) - 1))
    subset = subset[i1:]

    i2 = draw(st.integers(min_value=1, max_value=len(subset)))
    subset = subset[:i2]

    upper_bound = subset.pop()

    if len(subset) == 0:
        lower_bound = upper_bound
    else:
        lower_bound = subset[0]

    return (d, subset, lower_bound, upper_bound)
Example #29
0
def rescue_number(draw):
    # Any version 4 UUID.
    uuids = strategies.uuids(version=4)
    # Either a space or a `#` character.
    leading_integer_literals = strategies.one_of(strategies.just("#"), strategies.just(" "))
    # Any positive number.
    numbers = strategies.integers(min_value=0)

    # Produce either a number or a UUID
    payload_mux = strategies.one_of(numbers, uuids)
    # Draw from the mux.
    raw_payload = draw(payload_mux)
    # If its a uuid, draw from the uuid prefix strategy as well and mash the two together.
    if isinstance(raw_payload, UUID):
        return f"@{raw_payload}".lstrip()

    # Otherwise draw from the integer prefix strategy.
    return f"{draw(leading_integer_literals)}{raw_payload}".lstrip()
Example #30
0
def launcher_args(opts, min_jobs=0):
    args_dict = {
        CmdArgs.db_root: filepaths(),
        CmdArgs.optimizer_root: filepaths(),
        CmdArgs.uuid: uuids(),
        CmdArgs.data_root: filepaths(),
        CmdArgs.db: filenames(),
        CmdArgs.optimizer: lists(sampled_from(opts), min_size=1, max_size=len(opts)),
        CmdArgs.data: lists(datasets(), min_size=1),
        CmdArgs.classifier: lists(sampled_from(MODEL_NAMES), min_size=1, max_size=len(MODEL_NAMES)),
        CmdArgs.metric: lists(sampled_from(METRICS), min_size=1, max_size=len(METRICS)),
        CmdArgs.n_calls: integers(1, 100),
        CmdArgs.n_suggest: integers(1, 100),
        CmdArgs.n_repeat: integers(1, 100),
        CmdArgs.n_jobs: integers(min_jobs, 1000),
        CmdArgs.jobs_file: filepaths(),
        CmdArgs.verbose: booleans(),
    }
    S = fixed_dictionaries(args_dict)
    return S
def party_lists(draw: _DrawType, num_parties: int):
    """
    Generates a `List[Party]` of the requested length.
    :param draw: Hidden argument, used by Hypothesis.
    :param num_parties: Number of parties to generate in the list.
    """
    party_names = [f"Party{n}" for n in range(num_parties)]
    party_abbrvs = [f"P{n}" for n in range(num_parties)]

    assert num_parties > 0

    return [
        Party(
            object_id=str(draw(uuids())),
            name=InternationalizedText([Language(party_names[i], "en")]),
            abbreviation=party_abbrvs[i],
            color=None,
            logo_uri=draw(urls()),
        ) for i in range(num_parties)
    ]
def candidates(draw: _DrawType, party_list: Optional[List[Party]]):
    """
    Generates a `Candidate` object, assigning it one of the parties from `party_list` at random,
    with a chance that there will be no party assigned at all.
    :param draw: Hidden argument, used by Hypothesis.
    :param party_list: A list of `Party` objects. If None, then the resulting `Candidate`
        will have no party.
    """
    if party_list:
        party = party_list[draw(integers(0, len(party_list) - 1))]
        party_id = party.get_party_id()
    else:
        party_id = None

    return Candidate(
        str(draw(uuids())),
        draw(internationalized_human_names()),
        party_id,
        draw(one_of(just(None), urls())),
    )
Example #33
0
class TestHandleEdit(HelperMixin):
    @given(st.builds(los_org.OrgUnit), st.uuids(), st.datetimes())
    def test_handle_edit(
        self,
        instance: los_org.OrgUnit,
        dar_uuid: UUID,
        filedate: datetime,
    ):
        importer = los_org.OrgUnitImporter()
        with self._mock_dar_lookup((instance.post_address, dar_uuid)):
            with self._mock_read_csv(instance):
                with self._mock_get_client_session():
                    with self._mock_create_details() as mock_create_details:
                        with self._mock_edit_details() as mock_edit_details:
                            with self._mock_lookup_organisationfunktion():
                                self._run_until_complete(
                                    importer.handle_edit(
                                        "unused_filename.csv", filedate))
                                mock_create_details.assert_called_once()
                                mock_edit_details.assert_called_once()
Example #34
0
def _build_node(applications):
    # All the manifestations in `applications`.
    app_manifestations = set(
        app.volume.manifestation for app in applications if app.volume
    )
    # A set that contains all of those, plus an arbitrary set of
    # manifestations.
    dataset_ids = frozenset(
        app.volume.manifestation.dataset_id
        for app in applications if app.volume
    )
    manifestations = (
        st.sets(MANIFESTATIONS.filter(
            lambda m: m.dataset_id not in dataset_ids))
        .map(pset)
        .map(lambda ms: ms.union(app_manifestations))
        .map(lambda ms: dict((m.dataset.dataset_id, m) for m in ms)))
    return st.builds(
        Node, uuid=st.uuids(),
        applications=st.just({a.name: a for a in applications}),
        manifestations=manifestations)
def ballot_styles(draw: _DrawType, party_ids: List[Party],
                  geo_units: List[GeopoliticalUnit]):
    """
    Generates a `BallotStyle` object, which rolls up a list of parties and
    geopolitical units (passed as arguments), with some additional information
    added on as well.
    :param draw: Hidden argument, used by Hypothesis.
    :param party_ids: a list of `Party` objects to be used in this ballot style
    :param geo_units: a list of `GeopoliticalUnit` objects to be used in this ballot style
    """
    assert len(party_ids) > 0
    assert len(geo_units) > 0

    gp_unit_ids = [x.object_id for x in geo_units]
    if len(gp_unit_ids) == 0:
        gp_unit_ids = None

    party_ids = [x.get_party_id() for x in party_ids]
    if len(party_ids) == 0:
        party_ids = None

    image_uri = draw(urls())
    return BallotStyle(str(draw(uuids())), gp_unit_ids, party_ids, image_uri)
Example #36
0
class TestContains(TestJobListRequiringQuery):
    """
    Contains unit tests for the ``__contains__`` method. This method overrides
    ``in`` in order to check for membership
    """
    @given(uuids())
    def test_that_contains_can_check_membership_for_job_ids(
            self, job_id: UUID
    ) -> None:
        """

        :param job_id: The UUID for which to check whether the list contains
            the job
        """
        self.assertIn(job_id, self.job_list)

        self.assertEqual(
            mock.call(id=job_id),
            self.root_query.filter_by.call_args
        )
        self.assertTrue(self.root_query.filter_by().count.called)

    def test_that_contains_can_check_membership_for_jobs(
            self
    ) -> None:
        """
        Tests that the ``in`` function can correctly check if a job is in
        the job list.
        """
        job = mock.MagicMock(spec=Job)
        self.assertIn(job, self.job_list)

        self.assertEqual(
            mock.call(id=job.id),
            self.root_query.filter_by.call_args
        )
        self.assertTrue(self.root_query.filter_by().count.called)
    def format_strategies(self, schema):
        """

        :param schema:

        :return:
        """
        min_max_size = dict(min_size=schema.min_length or 0,
                            max_size=schema.max_length)
        return {
            **self._format_strategies,
            "uuid": st.uuids().map(str),
            "uri": st_uris(),
            "uriref": st_uris(),
            "hostname": st_hostnames(),
            "date": st.dates().map(str),
            "date-time": st.datetimes().map(datetime.isoformat),
            "binary": st.binary(**min_max_size),
            "byte": st.binary(**min_max_size).map(base64.encodebytes),
            "int32": self.numbers(st_base=st.integers, schema=schema),
            "int64": self.numbers(st_base=st.integers, schema=schema),
            "float": self.numbers(st_base=st.floats, schema=schema),
            "double": self.numbers(st_base=st.floats, schema=schema),
        }
Example #38
0
class TestCompileGeneral(TestCase):
    @given(literals
           | st.dates()
           | st.datetimes()
           | st.decimals(allow_nan=False)
           | st.fractions()
           | st.timedeltas()
           | st.times()
           | st.uuids())
    def test_compile_pickle(self, form):
        self.assertEqual(form, eval(compiler.Compiler().pickle(form)))

    @given(literals)
    def test_compile_literal(self, form):
        self.assertEqual(form, eval(compiler.Compiler().quoted(form)))

    @given(st.characters(whitelist_categories=["Lu", "Ll", "Lt", "Nl",
                                               "Sm"], ))
    def test_un_x_quote(self, char):
        x = munger.x_quote(char)
        self.assertTrue(("x" + x).isidentifier())
        match = re.fullmatch("x(.*?)_", x)
        if match:
            self.assertEqual(char, munger.un_x_quote(match))
Example #39
0
def field_mappings():
    global __default_field_mappings

    if __default_field_mappings is None:
        # Sized fields are handled in _get_strategy_for_field()
        # URL fields are not yet handled
        __default_field_mappings = {
            dm.SmallIntegerField: st.integers(-32768, 32767),
            dm.IntegerField: st.integers(-2147483648, 2147483647),
            dm.BigIntegerField:
                st.integers(-9223372036854775808, 9223372036854775807),
            dm.PositiveIntegerField: st.integers(0, 2147483647),
            dm.PositiveSmallIntegerField: st.integers(0, 32767),
            dm.BinaryField: st.binary(),
            dm.BooleanField: st.booleans(),
            dm.DateField: st.dates(),
            dm.DateTimeField: st.datetimes(timezones=get_tz_strat()),
            dm.DurationField: st.timedeltas(),
            dm.EmailField: emails(),
            dm.FloatField: st.floats(),
            dm.NullBooleanField: st.one_of(st.none(), st.booleans()),
            dm.TimeField: st.times(timezones=get_tz_strat()),
            dm.UUIDField: st.uuids(),
        }

        # SQLite does not support timezone-aware times, or timedeltas that
        # don't fit in six bytes of microseconds, so we override those
        db = getattr(django_settings, 'DATABASES', {}).get('default', {})
        if db.get('ENGINE', '').endswith('.sqlite3'):  # pragma: no branch
            sqlite_delta = timedelta(microseconds=2 ** 47 - 1)
            __default_field_mappings.update({
                dm.TimeField: st.times(),
                dm.DurationField: st.timedeltas(-sqlite_delta, sqlite_delta),
            })

    return __default_field_mappings
Example #40
0
        by v2 to v3.
        """
        # Get a valid v2 config.
        v2_config = migrate_configuration(1, 2, self.v1_config, StubMigration)
        # Perform two sequential migrations to get from v1 to v3, starting
        # with a v1 config.
        result = migrate_configuration(1, 3, self.v1_config, StubMigration)
        # Compare the v1 --> v3 upgrade to the direct result of the
        # v2 --> v3 upgrade on the v2 config, Both should be identical
        # and valid v3 configs.
        self.assertEqual(result, StubMigration.upgrade_from_v2(v2_config))


DATASETS = st.builds(
    Dataset,
    dataset_id=st.uuids(),
    maximum_size=st.integers(min_value=1),
)

# UTC `datetime`s accurate to seconds
DATETIMES_TO_SECONDS = datetimes(
    timezones=['UTC']
).map(
    lambda d: d.replace(microsecond=0)
)

LEASES = st.builds(
    Lease,
    dataset_id=st.uuids(),
    node_id=st.uuids(),
    expiration=st.one_of(
def uuid_keys(draw):
    return draw(lists(uuids(), min_size=2, max_size=3))
Example #42
0
        by v2 to v3.
        """
        # Get a valid v2 config.
        v2_config = migrate_configuration(1, 2, self.v1_config, StubMigration)
        # Perform two sequential migrations to get from v1 to v3, starting
        # with a v1 config.
        result = migrate_configuration(1, 3, self.v1_config, StubMigration)
        # Compare the v1 --> v3 upgrade to the direct result of the
        # v2 --> v3 upgrade on the v2 config, Both should be identical
        # and valid v3 configs.
        self.assertEqual(result, StubMigration.upgrade_from_v2(v2_config))


DATASETS = st.builds(
    Dataset,
    dataset_id=st.uuids(),
    maximum_size=st.integers(),
)

# `datetime`s accurate to seconds
DATETIMES_TO_SECONDS = datetimes().map(lambda d: d.replace(microsecond=0))

LEASES = st.builds(
    Lease, dataset_id=st.uuids(), node_id=st.uuids(),
    expiration=st.one_of(
        st.none(),
        DATETIMES_TO_SECONDS
    )
)

# Constrain primary to be True so that we don't get invariant errors from Node
Example #43
0
# Copyright ClusterHQ Ltd.  See LICENSE file for details.

"""
Hypothesis strategies for testing ``flocker.node.agents``.
"""

from sys import maxint

from hypothesis.strategies import (
    builds,
    integers,
    none,
    text,
    uuids,
)

from ..blockdevice import BlockDeviceVolume

blockdevice_volumes = builds(
    BlockDeviceVolume,
    blockdevice_id=text(),
    # XXX: Probably should be positive integers
    size=integers(max_value=maxint),
    attached_to=text() | none(),
    dataset_id=uuids(),
)
Example #44
0
_global_type_lookup = {
    # Types with core Hypothesis strategies
    type(None): st.none(),
    bool: st.booleans(),
    int: st.integers(),
    float: st.floats(),
    complex: st.complex_numbers(),
    fractions.Fraction: st.fractions(),
    decimal.Decimal: st.decimals(),
    text_type: st.text(),
    bytes: st.binary(),
    datetime.datetime: st.datetimes(),
    datetime.date: st.dates(),
    datetime.time: st.times(),
    datetime.timedelta: st.timedeltas(),
    uuid.UUID: st.uuids(),
    tuple: st.builds(tuple),
    list: st.builds(list),
    set: st.builds(set),
    frozenset: st.builds(frozenset),
    dict: st.builds(dict),
    # Built-in types
    type: st.sampled_from([type(None), bool, int, str, list, set, dict]),
    type(Ellipsis): st.just(Ellipsis),
    type(NotImplemented): st.just(NotImplemented),
    bytearray: st.binary().map(bytearray),
    memoryview: st.binary().map(memoryview),
    # Pull requests with more types welcome!
}

if PY2:
# Mapping of field types, to strategy objects or functions of (type) -> strategy
_global_field_lookup = {
    dm.SmallIntegerField: st.integers(-32768, 32767),
    dm.IntegerField: st.integers(-2147483648, 2147483647),
    dm.BigIntegerField: st.integers(-9223372036854775808, 9223372036854775807),
    dm.PositiveIntegerField: st.integers(0, 2147483647),
    dm.PositiveSmallIntegerField: st.integers(0, 32767),
    dm.BinaryField: st.binary(),
    dm.BooleanField: st.booleans(),
    dm.DateField: st.dates(),
    dm.EmailField: emails(),
    dm.FloatField: st.floats(),
    dm.NullBooleanField: st.one_of(st.none(), st.booleans()),
    dm.URLField: urls(),
    dm.UUIDField: st.uuids(),
    df.DateField: st.dates(),
    df.DurationField: st.timedeltas(),
    df.EmailField: emails(),
    df.FloatField: st.floats(allow_nan=False, allow_infinity=False),
    df.IntegerField: st.integers(-2147483648, 2147483647),
    df.NullBooleanField: st.one_of(st.none(), st.booleans()),
    df.URLField: urls(),
    df.UUIDField: st.uuids(),
}


def register_for(field_type):
    def inner(func):
        _global_field_lookup[field_type] = func
        return func
Example #46
0
# Five seems a sensible average.
task_level_lists = lists(task_level_indexes, min_size=1, max_size=6)
task_levels = task_level_lists.map(lambda level: TaskLevel(level=level))


# Text generation is slow, and most of the things are short labels. We set
# a restricted alphabet so they're easier to read, and in general large
# amount of randomness in label generation doesn't enhance our testing in
# any way, since we don't parse type names or user field values.
labels = text(min_size=1, max_size=8, alphabet="CGAT")

timestamps = floats(min_value=0, max_value=1000.0)

message_core_dicts = fixed_dictionaries(
    dict(task_level=task_level_lists.map(pvector),
         task_uuid=uuids().map(unicode),
         timestamp=timestamps)).map(pmap)


# Text generation is slow. We can make it faster by not generating so
# much. These are reasonable values.
message_data_dicts = dictionaries(
    keys=labels, values=labels,
    # People don't normally put much more than ten fields in their
    # messages, surely?
    max_size=10,
).map(pmap)


def written_from_pmap(d):
    """
Example #47
0
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import pytest

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import minimal


@given(st.lists(st.uuids()))
def test_are_unique(ls):
    assert len(set(ls)) == len(ls)


def test_retains_uniqueness_in_simplify():
    ts = minimal(st.lists(st.uuids()), lambda x: len(x) >= 5)
    assert len(ts) == len(set(ts)) == 5


@pytest.mark.parametrize('version', (1, 2, 3, 4, 5))
def test_can_generate_specified_version(version):
    @given(st.uuids(version=version))
    def inner(uuid):
        assert version == uuid.version
Example #48
0
def test_retains_uniqueness_in_simplify():
    ts = minimal(st.lists(st.uuids()), lambda x: len(x) >= 5)
    assert len(ts) == len(set(ts)) == 5
Example #49
0
def test_can_generate_specified_version(version):
    @given(st.uuids(version=version))
    def inner(uuid):
        assert version == uuid.version

    inner()
Example #50
0
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2015 David R. MacIver
# ([email protected]), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import find, given


@given(st.lists(st.uuids()))
def test_are_unique(ls):
    assert len(set(ls)) == len(ls)


@given(st.lists(st.uuids()), st.randoms())
def test_retains_uniqueness_in_simplify(ls, rnd):
    ts = find(st.lists(st.uuids()), lambda x: len(x) >= 5, random=rnd)
    assert len(ts) == len(set(ts)) == 5
Example #51
0
def unique_name_strategy(draw):
    """
    A hypothesis strategy to generate an always unique name.
    """
    return unicode(draw(st.uuids()))
Example #52
0
class TestProcessQuery(unittest.TestCase):

    # @classmethod
    # def setUpClass(cls):
    #     local_client = DgraphClient(DgraphClientStub('localhost:9080'))
    #
    #     # drop_all(local_client)
    #     # time.sleep(3)
    #     provision()
    #     provision()

    # @classmethod
    # def tearDownClass(cls):
    #     local_client = DgraphClient(DgraphClientStub('localhost:9080'))
    #
    #     drop_all(local_client)
    #     provision()

    @hypothesis.settings(deadline=None)
    @given(process_props=process_props())
    def test_single_process_contains_key(self, process_props: ProcessProps) -> None:
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))
        created_proc = get_or_create_process(self, local_client, process_props)

        # Setup complete, do some queries

        queried_proc = ProcessQuery().query_first(
            local_client, contains_node_key=created_proc.node_key
        )

        assert queried_proc
        assert created_proc.get_process_id() == queried_proc.get_process_id()
        assert created_proc.node_key == queried_proc.node_key
        assert "Process" == queried_proc.get_node_type()
        assert created_proc.get_arguments() == queried_proc.get_arguments()
        assert (
            created_proc.get_created_timestamp() == queried_proc.get_created_timestamp()
        )
        assert created_proc.get_terminate_time() == queried_proc.get_terminate_time()
        assert created_proc.get_image_name() == queried_proc.get_image_name()
        assert created_proc.get_process_name() == queried_proc.get_process_name()

        assert not queried_proc.get_asset()

    @hypothesis.settings(deadline=None)
    @given(
        asset_props=asset_props(), process_props=process_props(),
    )
    def test_single_process_connected_to_asset_node(
        self, asset_props: AssetProps, process_props: ProcessProps,
    ):
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_asset = get_or_create_asset(self, local_client, asset_props)
        created_proc = get_or_create_process(self, local_client, process_props)

        create_edge(
            local_client, created_asset.uid, "asset_processes", created_proc.uid,
        )

        # Setup complete, do some queries

        queried_proc = (
            ProcessQuery()
            .with_asset(AssetQuery().with_hostname(created_asset.get_hostname()))
            .query_first(local_client, contains_node_key=created_proc.node_key)
        )
        assert queried_proc
        fetch_all_properties(queried_proc)
        assert_equal_props(created_proc, queried_proc)
        queried_asset = queried_proc.get_asset()
        assert_equal_identity(created_asset, queried_asset)

    # Given that the code that generates timestamps only uses unsized types we can make some
    # assumptions about the data
    @hypothesis.settings(deadline=None)
    @given(process_props=process_props())
    def test_process_query_view_parity(self, process_props: ProcessProps):
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_proc = get_or_create_process(self, local_client, process_props,)

        queried_proc = (
            ProcessQuery()
            .with_node_key(eq=created_proc.node_key)
            .query_first(local_client)
        )

        assert queried_proc

        assert process_props["node_key"] == queried_proc.node_key
        assert "Process" == queried_proc.get_node_type()
        assert process_props["process_id"] == queried_proc.get_process_id()
        assert process_props["arguments"] == escape_dgraph_str(
            queried_proc.get_arguments()
        )
        assert (
            process_props["created_timestamp"] == queried_proc.get_created_timestamp()
        )
        assert None == queried_proc.get_asset()
        assert process_props["terminate_time"] == queried_proc.get_terminate_time()
        assert process_props["image_name"] == escape_dgraph_str(
            queried_proc.get_image_name()
        )
        assert process_props["process_name"] == escape_dgraph_str(
            queried_proc.get_process_name()
        )

    @hypothesis.settings(deadline=None)
    @given(
        node_key=st.uuids(),
        process_id=st.integers(min_value=1, max_value=2 ** 32),
        created_timestamp=st.integers(min_value=0, max_value=2 ** 48),
        terminate_time=st.integers(min_value=0, max_value=2 ** 48),
        image_name=st.text(),
        process_name=st.text(),
        arguments=st.text(),
    )
    def test_process_query_view_parity_eq(
        self,
        node_key,
        process_id,
        created_timestamp,
        terminate_time,
        image_name,
        process_name,
        arguments,
    ):
        node_key = "test_process_query_view_parity_eq" + str(node_key)
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))
        get_or_create_process_node_deprecated(
            local_client,
            node_key,
            process_id,
            arguments,
            created_timestamp,
            terminate_time,
            image_name,
            process_name,
        )

        queried_proc = (
            ProcessQuery()
            .with_node_key(eq=node_key)
            .with_process_id(eq=process_id)
            .with_arguments(eq=arguments)
            .with_created_timestamp(eq=created_timestamp)
            .with_terminate_time(eq=terminate_time)
            .with_image_name(eq=image_name)
            .with_process_name(eq=process_name)
            .query_first(local_client)
        )

        # assert process_view.process_id == queried_proc.get_process_id()
        assert node_key == queried_proc.node_key
        assert "Process" == queried_proc.get_node_type()
        assert process_id == queried_proc.get_process_id()

        assert arguments == queried_proc.get_arguments()
        assert created_timestamp == queried_proc.get_created_timestamp()
        assert terminate_time == queried_proc.get_terminate_time()
        assert image_name == queried_proc.get_image_name()
        assert process_name == queried_proc.get_process_name()

    @hypothesis.settings(deadline=None)
    @given(process_props=process_props())
    def test_process_query_view_miss(self, process_props: ProcessProps) -> None:
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_proc = get_or_create_process(self, local_client, process_props)

        assert (
            created_proc.process_id is not None
            and created_proc.arguments is not None
            and created_proc.created_timestamp is not None
            and created_proc.terminate_time is not None
            and created_proc.image_name is not None
            and created_proc.process_name is not None
        )
        queried_proc = (
            ProcessQuery()
            .with_node_key(eq=created_proc.node_key)
            .with_process_id(eq=Not(created_proc.process_id))
            .with_arguments(eq=Not(created_proc.arguments))
            .with_created_timestamp(eq=Not(created_proc.created_timestamp))
            .with_terminate_time(eq=Not(created_proc.terminate_time))
            .with_image_name(eq=Not(created_proc.image_name))
            .with_process_name(eq=Not(created_proc.process_name))
            .query_first(local_client)
        )

        assert not queried_proc

    # Given that the code that generates timestamps only uses unsized types we can make some
    # assumptions about the data

    @hypothesis.settings(deadline=None)
    @given(
        node_key=st.uuids(),
        process_id=st.integers(min_value=1, max_value=2 ** 32),
        created_timestamp=st.integers(min_value=0, max_value=2 ** 48),
        terminate_time=st.integers(min_value=0, max_value=2 ** 48),
        image_name=text_dgraph_compat(),
        process_name=text_dgraph_compat(),
        arguments=text_dgraph_compat(),
    )
    def test_process_query_view_parity_contains(
        self,
        node_key,
        process_id,
        created_timestamp,
        terminate_time,
        image_name,
        process_name,
        arguments,
    ):
        node_key = "test_process_query_view_parity_contains" + str(node_key)
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))
        get_or_create_process_node_deprecated(
            local_client,
            node_key,
            process_id,
            arguments,
            created_timestamp,
            terminate_time,
            image_name,
            process_name,
        )

        query = ProcessQuery().with_node_key(eq=node_key)

        # Don't f**k with newlines due to a dgraph bug
        # https://github.com/dgraph-io/dgraph/issues/4694
        for prop in [arguments, image_name, process_name]:
            hypothesis.assume(len(prop) > 3)
            hypothesis.assume("\n" not in prop)
            hypothesis.assume("\\" not in prop)

        # These fail because dgraph doesn't like the query
        # 	(regexp(process_name, /00\\//))
        query.with_arguments(contains=arguments[: len(arguments) - 1])
        query.with_image_name(contains=image_name[: len(image_name) - 1])
        query.with_process_name(contains=process_name[: len(process_name) - 1])

        queried_proc = query.query_first(local_client)

        assert queried_proc
        assert "Process" == queried_proc.get_node_type()
        assert process_id == queried_proc.get_process_id()
        assert node_key == queried_proc.node_key
        assert arguments == queried_proc.get_arguments()
        assert created_timestamp == queried_proc.get_created_timestamp()
        assert terminate_time == queried_proc.get_terminate_time()
        assert image_name == queried_proc.get_image_name()
        assert process_name == queried_proc.get_process_name()

    def test_parent_children_edge(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c",
            parent_process,
        )

        child_process = {
            "process_id": 110,
            "process_name": "malware.exe",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        child_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "46d2862f-cb58-4062-b35e-bb310b8d5b0d",
            child_process,
        )

        create_edge(
            local_client, parent_process_view.uid, "children", child_process_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_node_key(eq="0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c")
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_children(
                ProcessQuery()
                .with_node_key(eq="46d2862f-cb58-4062-b35e-bb310b8d5b0d")
                .with_process_id(eq=110)
                .with_process_name(eq="malware.exe")
                .with_created_timestamp(eq=created_timestamp + 1000)
            )
            .query_first(local_client)
        )
        assert queried_process

        assert queried_process.node_key == "0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c"
        assert queried_process.process_id == 100
        assert queried_process.process_name == "word.exe"
        assert queried_process.created_timestamp == created_timestamp

        assert len(queried_process.children) == 1
        child = queried_process.children[0]
        assert child.node_key == "46d2862f-cb58-4062-b35e-bb310b8d5b0d"
        assert child.process_id == 110
        assert child.process_name == "malware.exe"
        assert child.created_timestamp == created_timestamp + 1000

    def test_with_bin_file(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "635952af-87f3-4a2a-a65d-3f1859db9525",
            parent_process,
        )

        bin_file = {
            "file_path": "/folder/file.txt",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        bin_file_view = upsert(
            local_client,
            "File",
            FileView,
            "9f16e0c9-33c0-4d18-9878-ef686373570b",
            bin_file,
        )

        create_edge(
            local_client, parent_process_view.uid, "bin_file", bin_file_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_node_key(eq="635952af-87f3-4a2a-a65d-3f1859db9525")
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_bin_file(
                FileQuery()
                .with_node_key(eq="9f16e0c9-33c0-4d18-9878-ef686373570b")
                .with_file_path(eq="/folder/file.txt")
            )
            .query_first(local_client)
        )

        assert queried_process
        assert "635952af-87f3-4a2a-a65d-3f1859db9525"
        assert queried_process.process_id == 100
        assert queried_process.process_name == "word.exe"
        assert queried_process.created_timestamp == created_timestamp

        bin_file = queried_process.bin_file
        assert bin_file.node_key == "9f16e0c9-33c0-4d18-9878-ef686373570b"

        assert bin_file.file_path == "/folder/file.txt"

    def test_process_with_created_files(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "763ddbda-8812-4a07-acfe-83402b92379d",
            parent_process,
        )

        created_file = {
            "file_path": "/folder/file.txt",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        created_file_view = upsert(
            local_client,
            "File",
            FileView,
            "575f103e-1a11-4650-9f1b-5b72e44dfec3",
            created_file,
        )

        create_edge(
            local_client,
            parent_process_view.uid,
            "created_files",
            created_file_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_node_key(eq="763ddbda-8812-4a07-acfe-83402b92379d")
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_created_files(
                FileQuery()
                .with_node_key(eq="575f103e-1a11-4650-9f1b-5b72e44dfec3")
                .with_file_path(eq="/folder/file.txt")
            )
            .query_first(local_client)
        )

        assert queried_process
        assert queried_process.process_id == 100

        assert len(queried_process.created_files) == 1
        created_file = queried_process.created_files[0]
        assert created_file.file_path == "/folder/file.txt"

    def test_with_deleted_files(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "test_with_deleted_files-47527d73-22c4-4e0f-bf7d-184bf1f206e2",
            parent_process,
        )

        deleted_file = {
            "file_path": "/folder/file.txt",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        deleted_file_view = upsert(
            local_client,
            "File",
            FileView,
            "test_with_deleted_files8b8364ea-9b47-476b-8cf0-0f724adff10f",
            deleted_file,
        )

        create_edge(
            local_client,
            parent_process_view.uid,
            "deleted_files",
            deleted_file_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_deleted_files(FileQuery().with_file_path(eq="/folder/file.txt"))
            .query_first(local_client)
        )

        assert queried_process
        assert queried_process.process_id == 100

    def test_with_read_files(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "test_with_read_files-669a3693-d960-401c-8d29-5d669ffcd660",
            parent_process,
        )

        read_file = {
            "file_path": "/folder/file.txt",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        read_file_view = upsert(
            local_client,
            "File",
            FileView,
            "test_with_read_files-aa9248ec-36ee-4177-ba1a-999de735e682",
            read_file,
        )

        create_edge(
            local_client, parent_process_view.uid, "read_files", read_file_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_read_files(FileQuery().with_file_path(eq="/folder/file.txt"))
            .query_first(local_client)
        )

        assert queried_process
        assert (
            queried_process.node_key
            == "test_with_read_files-669a3693-d960-401c-8d29-5d669ffcd660"
        )

        assert queried_process.process_id == 100
        assert queried_process.process_name == "word.exe"

        assert len(queried_process.read_files) == 1
        assert (
            queried_process.read_files[0].node_key
            == "test_with_read_files-aa9248ec-36ee-4177-ba1a-999de735e682"
        )
        assert queried_process.read_files[0].file_path == "/folder/file.txt"

    def test_with_wrote_files(self) -> None:
        # Given: a process with a pid 100 & process_name word.exe,
        local_client = DgraphClient(DgraphClientStub("localhost:9080"))

        created_timestamp = int(time.time())

        parent_process = {
            "process_id": 100,
            "process_name": "word.exe",
            "created_timestamp": created_timestamp,
        }  # type: Dict[str, Property]

        parent_process_view = upsert(
            local_client,
            "Process",
            ProcessView,
            "test_with_wrote_files-8f0761fb-2ffe-4d4b-ab38-68e5489f56dc",
            parent_process,
        )

        wrote_file = {
            "file_path": "/folder/file.txt",
            "created_timestamp": created_timestamp + 1000,
        }  # type: Dict[str, Property]

        wrote_file_view = upsert(
            local_client,
            "File",
            FileView,
            "test_with_wrote_files-2325c49a-95b4-423f-96d0-99539fe03833",
            wrote_file,
        )

        create_edge(
            local_client, parent_process_view.uid, "wrote_files", wrote_file_view.uid,
        )

        queried_process = (
            ProcessQuery()
            .with_node_key(
                eq="test_with_wrote_files-8f0761fb-2ffe-4d4b-ab38-68e5489f56dc"
            )
            .with_process_id(eq=100)
            .with_process_name(contains="word")
            .with_created_timestamp(eq=created_timestamp)
            .with_wrote_files(
                FileQuery()
                .with_node_key(
                    eq="test_with_wrote_files-2325c49a-95b4-423f-96d0-99539fe03833"
                )
                .with_file_path(eq="/folder/file.txt")
            )
            .query_first(local_client)
        )

        assert queried_process
        assert (
            queried_process.node_key
            == "test_with_wrote_files-8f0761fb-2ffe-4d4b-ab38-68e5489f56dc"
        )
        assert queried_process.process_id == 100
        assert queried_process.process_name == "word.exe"

        assert len(queried_process.wrote_files) == 1
        assert (
            queried_process.wrote_files[0].node_key
            == "test_with_wrote_files-2325c49a-95b4-423f-96d0-99539fe03833"
        )
        assert queried_process.wrote_files[0].file_path == "/folder/file.txt"
Example #53
0
def test_retains_uniqueness_in_simplify(ls, rnd):
    ts = find(st.lists(st.uuids()), lambda x: len(x) >= 5, random=rnd)
    assert len(ts) == len(set(ts)) == 5