def test_execute_with_mock_anitya(self, ecosystem, project, md5sum,
                                      dist_git):
        rdb()
        s = create_db_scoped_session()
        dummy_homepage = "http://project-homepage.com"

        dummy_response = Response()
        dummy_response.status_code = 200
        s.add(Ecosystem(name='npm', backend=EcosystemBackend.npm))
        s.commit()
        DownstreamMapCache(
        )[md5sum] = dist_git  # fill in key-value mapping in cache

        task = AnityaTask.create_test_instance(task_name='anitya')
        args = {'ecosystem': ecosystem, 'name': project}
        flexmock(task).should_receive(
            "_get_project_homepage").once().and_return(dummy_homepage)
        flexmock(task).should_receive("_get_artifact_hash").once().and_return(
            md5sum)
        flexmock(task).should_receive(
            "_create_anitya_project").once().and_return(dummy_response)
        flexmock(task).should_receive(
            "_add_downstream_mapping").once().and_return(dummy_response)

        results = task.execute(arguments=args)
        assert results is None
예제 #2
0
def rdb():
    session = create_db_scoped_session()
    # TODO: we may need to run actual migrations here
    # make sure all session objects from scoped_session get closed here
    #  otherwise drop_all() would hang indefinitely
    session.close_all()
    # NOTE: this also drops any data created by fixtures (e.g. builtin ecosystems),
    #   so if you want to use these, create them by hand before running your tests
    # We can't use Base.metadata.drop_all(bind=session.bind), since they may be tables from
    #   e.g. bayesian server, that reference f8a_worker tables and will prevent dropping them
    tables = session.bind.table_names()
    for t in tables:
        session.execute('drop table if exists "{t}" cascade'.format(t=t))
        session.commit()
    Base.metadata.create_all(bind=session.bind)
    return session
    def setup_method(self, method):
        rdb()
        self.s = create_db_scoped_session()
        self.en = 'foo'
        self.pn = 'bar'
        self.vi = '1.1.1'
        self.e = Ecosystem(name=self.en, backend=EcosystemBackend.maven)
        self.p = Package(ecosystem=self.e, name=self.pn)
        self.v = Version(package=self.p, identifier=self.vi)
        self.a = Analysis(version=self.v, finished_at=datetime.datetime.now())
        self.a2 = Analysis(version=self.v,
                           finished_at=datetime.datetime.now() +
                           datetime.timedelta(seconds=10))
        self.s.add(self.a)
        self.s.add(self.a2)
        self.s.commit()

        self.bp = BayesianPostgres(
            connection_string=configuration.POSTGRES_CONNECTION)