Example #1
0
    def test_handle_pull_request(self):
        ToolPullRequest.objects.all().delete()
        import time

        def sleep_mock(v):
            t = ToolPullRequest.objects.get(tool='murcss', tagged_version='1.0')
            t.status = 'failed'
            t.save()
        time.sleep = sleep_mock

        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['murcss', '--pull-request', '--tag=1.0'])
        stdout.stopCapturing()
        cmd_out = stdout.getvalue()
        self.assertEqual(cmd_out, """Please wait while your pull request is processed
The pull request failed.
Please contact the admins.
""")

        def sleep_mock_success(v):
            t = ToolPullRequest.objects.get(tool='murcss', tagged_version='2.0')
            t.status = 'success'
            t.save()
        time.sleep = sleep_mock_success
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['murcss', '--pull-request', '--tag=2.0'])
        stdout.stopCapturing()
        cmd_out = stdout.getvalue()
        self.assertEqual(cmd_out, """Please wait while your pull request is processed
murcss plugin is now updated in the system.
New version: 2.0
""")
Example #2
0
    def test_help(self):
        stdout.startCapturing()
        stdout.reset()
        with self.assertRaises(SystemExit):
            self.cmd.run(['--help'])
        stdout.stopCapturing()
        help_str = stdout.getvalue()

        self.assertEqual(help_str, '''Applies some analysis to the given data.
See https://code.zmaw.de/projects/miklip-d-integration/wiki/Analyze for more information.

The "query" part is a key=value list used for configuring the tool. It's tool dependent so check that tool help.

For Example:
    freva --plugin pca eofs=4 bias=False input=myfile.nc outputdir=/tmp/test

Usage: %s %s [options]

Options:
  -d, --debug           turn on debugging info and show stack trace on
                        exceptions.
  -h, --help            show this help message and exit
  --repos-version       show the version number from the repository
  --caption=CAPTION     sets a caption for the results
  --save                saves the configuration locally for this user.
  --save-config=FILE    saves the configuration at the given file path
  --show-config         shows the resulting configuration (implies dry-run).
  --scheduled-id=ID     Runs a scheduled job from database
  --dry-run             dry-run, perform no computation. This is used for
                        viewing and handling the configuration.
  --batchmode=BOOL      creates a SLURM job
  --unique_output=BOOL  If true append the freva run id to every output folder
  --pull-request        issue a new pull request for the tool (developer
                        only!)
  --tag=TAG             The git tag to pull
''' % (os.path.basename(sys.argv[0]), sys.argv[1]))

        stdout.startCapturing()
        stdout.reset()
        with self.assertRaises(SystemExit):
            self.cmd.run(['dummyplugin', '--help'])
        stdout.stopCapturing()
        help_str = stdout.getvalue()
        self.assertEqual(help_str, '''DummyPlugin (v0.0.0): A dummy plugin
Options:
number     (default: <undefined>)
           This is just a number, not really important
the_number (default: <undefined>) [mandatory]
           This is *THE* number. Please provide it
something  (default: test)
           No help available.
other      (default: 1.4)
           No help available.
input      (default: <undefined>)
           No help available.
''')
Example #3
0
 def test_list_tools(self):
     stdout.startCapturing()
     stdout.reset()
     self.cmd.run([])
     stdout.stopCapturing()
     plugin_list = stdout.getvalue()
     self.assertIn('DummyPlugin: A dummy plugin\n', plugin_list)
Example #4
0
    def run_command_with_capture(self, args_list=[]):

        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(args_list)
        stdout.stopCapturing()
        return stdout.getvalue()
Example #5
0
 def test_dummy_command(self):
     stdout.startCapturing()
     stdout.reset()
     DummyCommand().run(['--input=10', '-d'])
     stdout.stopCapturing()
     command_out = stdout.getvalue()
     self.assertEqual(command_out, 'The answer is 10\n')
Example #6
0
 def test_bad_option(self):
     stdout.startCapturing()
     stdout.reset()
     with self.assertRaises(SystemExit):
         DummyCommand().run(['--input1'])
     stdout.stopCapturing()
     help_out = stdout.getvalue()
     self.assertIn('''Did you mean this?\n\tinput''', help_out)
Example #7
0
    def test_history(self):

        hist_ids = []
        for i in range(10):
            hist_ids += [
                self.user.getUserDB().storeHistory(tool=DummyPlugin(),
                                                   config_dict={
                                                       'the_number': 42,
                                                       'number': 12,
                                                       'something': 'else',
                                                       'other': 'value',
                                                       'input': '/folder'
                                                   },
                                                   status=0,
                                                   uid=self.udata.pw_name)
            ]

        # test history output
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run([])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        self.assertEqual(output_str.count('dummyplugin'), 10)
        self.assertEqual(output_str.count('\n'), 10)

        # test limit output
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['--limit=3'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        self.assertEqual(output_str.count('dummyplugin'), 3)
        self.assertEqual(output_str.count('\n'), 3)

        # test return_command option
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['--entry_ids=%s' % hist_ids[0], '--return_command'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        self.assertIn(
            '--plugin dummyplugin something=\'else\' input=\'/folder\' other=\'value\' number=\'12\' the_number=\'42\'',
            output_str)
Example #8
0
 def test_list_commands(self):
     stdout.startCapturing()
     stdout.reset()
     self.freva.auto_doc()
     stdout.stopCapturing()
     freva_commands = stdout.getvalue()
     self.assertIn('--plugin', freva_commands)
     self.assertIn('--history', freva_commands)
     self.assertIn('--databrowser', freva_commands)
     self.assertIn('--crawl_my_data', freva_commands)
     self.assertIn('--esgf', freva_commands)
Example #9
0
    def test_crawl_my_data(self):
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run([])
        stdout.stopCapturing()
        output = stdout.getvalue()
        self.assertIn('Please wait while the system is crawling your data',
                      output)
        self.assertIn('Finished', output)
        self.assertIn('Crawling took', output)

        with self.assertRaises(SystemExit):
            self.assertRaises(self.cmd.run(['--path=/tmp/forbidden/folder']))
Example #10
0
    def test_run_plugin(self):

        # test missing parameter
        stdout.startCapturing()
        stdout.reset()
        with self.assertRaises(SystemExit):
            self.cmd.run(['dummyplugin'])
        stdout.stopCapturing()
        help_str = stdout.getvalue()
        self.assertIn('Error found when parsing parameters. Missing mandatory parameters: the_number', help_str)

        # test run tool
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['dummyplugin', 'the_number=32', '--caption="Some caption"'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        self.assertIn('Dummy tool was run with: {\'input\': None, \'other\': 1.4, \'number\': None, \'the_number\': 32, \'something\': \'test\'}', output_str)

        # test get version
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['dummyplugin', '--repos-version'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()

        # test batch mode
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['dummyplugin', '--batchmode=True', 'the_number=32'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()

        # test save config
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['dummyplugin', 'the_number=32', '--save'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        fn = '/home/illing/evaluation_system/config/dummyplugin/dummyplugin.conf'
        self.assertTrue(os.path.isfile(fn))
        os.remove(fn)

        # test show config
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run(['dummyplugin', 'the_number=42', '--show-config'])
        stdout.stopCapturing()
        output_str = stdout.getvalue()
        self.assertEqual(output_str, '''    number: -
the_number: 42
 something: test
     other: 1.4
     input: -
''')
        # remove all history entries
        History.objects.all().delete()
Example #11
0
 def test_command_fail(self):
     # add an entry to pull-requests
     pr = ToolPullRequest.objects.create(tool='murcss',
                                         tagged_version='1.0',
                                         user=User.objects.first(),
                                         status='waiting')
     stdout.startCapturing()
     stdout.reset()
     with self.assertRaises(SystemExit):
         self.cmd.run([])
     stdout.stopCapturing()
     cmd_out = stdout.getvalue()
     new_pr = ToolPullRequest.objects.get(id=pr.id)
     self.assertEqual(new_pr.status, 'failed')
     self.assertIn('ERROR:   Plugin murcss does not exist', cmd_out)
Example #12
0
    def test_auto_doc(self):
        stdout.startCapturing()
        stdout.reset()
        with self.assertRaises(SystemExit):
            self.cmd.run(['--help'])
        stdout.stopCapturing()
        doc_str = stdout.getvalue()
        self.assertEqual(
            doc_str, '''Use this command to update your projectdata.

Usage: %s %s [options]

Options:
  -d, --debug  turn on debugging info and show stack trace on exceptions.
  -h, --help   show this help message and exit
  --path=PATH  crawl the given directory
''' % (os.path.basename(sys.argv[0]), sys.argv[1]))
Example #13
0
    def test_auto_doc(self):
        stdout.startCapturing()
        stdout.reset()
        with self.assertRaises(SystemExit):
            DummyCommand().run(['--help'])
        stdout.stopCapturing()
        doc_str = stdout.getvalue()

        self.assertEqual(
            doc_str, '''This is a test dummy

Usage: %s %s [options]

Options:
  -d, --debug   turn on debugging info and show stack trace on exceptions.
  -h, --help    show this help message and exit
  --input=PATH  Some input value
''' % (os.path.basename(sys.argv[0]), sys.argv[1]))
Example #14
0
 def test_command(self):
     # add a "broken" but running job to db
     broken_obj = History.objects.create(
         status=History.processStatus.running,
         slurm_output='/some/out.txt',
         timestamp=datetime.datetime.now(),
         uid=User.objects.first())
     History.objects.create(status=History.processStatus.finished,
                            slurm_output='/some/out.txt',
                            timestamp=datetime.datetime.now(),
                            uid=User.objects.first())
     stdout.startCapturing()
     stdout.reset()
     self.cmd.run([])
     stdout.stopCapturing()
     cmd_out = stdout.getvalue()
     self.assertEqual(cmd_out, 'Setting job %s to broken\n' % broken_obj.id)
     self.assertEqual(
         History.objects.get(id=broken_obj.id).status,
         History.processStatus.broken)
Example #15
0
    def test_command_success(self):

        repo_path = '/tmp/test_plugin.git'
        tool_path = '/tmp/test_tool'
        os.makedirs(repo_path)
        shutil.copy('tests/mocks/result_tags.py', repo_path)
        # prepare git repo
        os.system('cd %s; git init; git add *; git commit -m "first commit" ' %
                  (repo_path))
        # clone it
        os.system('git clone %s %s' % (repo_path, tool_path))
        # create a new tag
        os.system('cd %s; git tag -a v2.0 -m "new tag"' % (repo_path))
        # add plugin to system
        os.environ['EVALUATION_SYSTEM_PLUGINS'] = '/tmp/test_tool,result_tags'
        pm.reloadPlugins()
        repository = Repo(tool_path)
        self.assertEqual(len(repository.tags), 0)

        pr = ToolPullRequest.objects.create(tool='resulttagtest',
                                            tagged_version='v2.0',
                                            user=User.objects.first(),
                                            status='waiting')
        # finally run the command
        stdout.startCapturing()
        stdout.reset()
        self.cmd.run([])
        stdout.stopCapturing()
        cmd_out = stdout.getvalue()

        self.assertIn('Processing pull request for resulttagtest by', cmd_out)
        new_pr = ToolPullRequest.objects.get(id=pr.id)
        self.assertEqual(new_pr.status, 'success')
        self.assertEqual(repository.tags[0].name, 'v2.0')
        self.assertEqual(len(repository.tags), 1)
        shutil.rmtree(repo_path)
        shutil.rmtree(tool_path)