def create_defaults(snippy): """Add default snippets for testing purposes.""" file_content = Content.get_file_content(Content.TEXT, {'data': [Snippet.REMOVE]}) with mock.patch('snippy.content.migrate.open', file_content, create=True): cause = snippy.run(['snippy', 'import', '-f', 'remove.txt'] + Content.db_cli_params()) assert cause == Cause.ALL_OK file_content = Content.get_file_content(Content.TEXT, {'data': [Snippet.FORCED]}) with mock.patch('snippy.content.migrate.open', file_content, create=True): cause = snippy.run(['snippy', 'import', '-f', 'forced.txt'] + Content.db_cli_params()) assert cause == Cause.ALL_OK file_content = Content.get_file_content(Content.TEXT, {'data': [Solution.BEATS]}) with mock.patch('snippy.content.migrate.open', file_content, create=True): cause = snippy.run(['snippy', 'import', '-f', 'beats.txt'] + Content.db_cli_params()) assert cause == Cause.ALL_OK file_content = Content.get_file_content(Content.TEXT, {'data': [Solution.NGINX]}) with mock.patch('snippy.content.migrate.open', file_content, create=True): cause = snippy.run(['snippy', 'import', '-f', 'nginx.txt'] + Content.db_cli_params()) assert cause == Cause.ALL_OK
def test_cli_performance(self, snippy_perf, capsys, caplog): """Test CLI performance. Verify performance of the tool on a rough scale. The intention is to keep a reference test that is just iterated few times and the time consumed is measured. This is more for manual analysis than automation as of now. Reference PC: 1 loop : 0.0252 / 55 loop : 1.0865 / 100 loop : 1.9484 Reference PC: 880 loop : 17.6897 / 1000 loop : 19.6802 The reference is with sqlite database in memory as with all tests. There is naturally jitter in results and the values are as of now hand picked from few examples. Note that when run on Python2, will use sqlite database in disk that is naturally slower than memory database. No errors should be printed and the runtime should be below 10 seconds. The runtime is intentionally set 15 times higher value than with the reference PC to cope with slow test envrironments. """ start = time.time() for _ in range(55): self.create_defaults(snippy_perf) Content.assert_storage_size(4) # Search all content. cause = snippy_perf.run( ['snippy', 'search', '--scat', 'all', '--sall', '.'] + Content.db_cli_params()) assert cause == Cause.ALL_OK # Delete all content. cause = snippy_perf.run( ['snippy', 'delete', '-d', '54e41e9b52a02b63'] + Content.db_cli_params()) assert cause == Cause.ALL_OK cause = snippy_perf.run( ['snippy', 'delete', '-d', '53908d68425c61dc'] + Content.db_cli_params()) assert cause == Cause.ALL_OK cause = snippy_perf.run( ['snippy', 'delete', '-d', '4346ba4c79247430'] + Content.db_cli_params()) assert cause == Cause.ALL_OK cause = snippy_perf.run( ['snippy', 'delete', '-d', '6cfe47a8880a8f81'] + Content.db_cli_params()) assert cause == Cause.ALL_OK Content.assert_storage(None) runtime = time.time() - start out, err = capsys.readouterr() print("====================================") print("Runtime %.4f" % runtime) print("There are %d rows in stdout" % len(out)) print("There are %d rows in stderr" % len(err)) print("====================================") assert not err assert not caplog.records[:] assert runtime < 15