def test_execution(name, python_command):
    '''Execute the test case'''
    tt.run_checked(['output/aql/bin/standalone', 'loop=100'])
    tt.run_checked(['output/uima/bin/standalone', 'loop=100'])
    tt.run_checked(['output/ruta/bin/standalone', 'loop=100'])
    tt.run_checked(
        ['output/uima/bin/standalone', 'loop=100', 'pear=EmptyUima.pear'])
    tt.run_checked(
        ['output/ruta/bin/standalone', 'loop=100', 'pear=EmptyRuta.pear'])
Beispiel #2
0
def test_execution(name, python_command):
	'''Execute the test case'''
	tt.run_checked(['output/ModelBuilder/bin/standalone'])
	tt.run_checked(['diff', 'data/out_tfidf_corpus.txt', 'data/tfidf_corpus.txt.expected'])
	print '****** corpus trainer finished'
	tt.run_checked(['output/bin/standalone'])
	tt.run_checked(['diff', 'data/out.txt', 'data/expected.txt'])
def test_execution(name, python_command):
    '''Execute the test case'''
    #tt.assert_pass(err != 0, stdout, stderr)
    print "Execute scenario ContentRankingSample"
    tt.run_checked([
        'output/ModelBuilder/bin/standalone', 'pythonCommand=' + python_command
    ])

    # the test script runs in python2
    # check the python version in the environment since the Streams job might use a different python version
    ver = tt.get_major_version(python_command)
    shutil.copy('data/model_KB/d_lemms.json.provided' + str(ver),
                'data/model_KB/d_lemms.json')
    shutil.copy('data/model_KB/kb_lstm_model.pklz.provided' + str(ver),
                'data/model_KB/kb_lstm_model.pklz')

    print 'XXXXXXXXXXXXXXXXXXXXXXXXXXX'
    tt.run_checked(
        ['output/bin/standalone', 'pythonCommand=' + python_command])
    print 'XXXXXXXXXXXXXXXXXXXXXXXXXXX'
    tt.run_checked(
        ['diff', 'data/out.txt', 'data/expected' + str(ver) + '.txt'])
Beispiel #4
0
def test_execution(name, python_command):
    '''Execute the test case'''
    tt.run_checked(['output/bin/standalone'])
    tt.run_checked(['diff', 'data/out.txt', 'data/expected.txt'])
Beispiel #5
0
def test_execution(name, python_command):
    '''Execute the test case'''
    tt.run_checked(['./runTest.sh'])
    tt.run_checked(['diff', 'mem0', 'mem1'])
Beispiel #6
0
def test_execution(name, python_command):
    '''Execute the test case'''
    #tt.assert_pass(err != 0, stdout, stderr)
    print "Execute scenario LinearClassificationSample"
    tt.run_checked([
        'output/ModelBuilder/bin/standalone', 'pythonCommand=' + python_command
    ])
    tt.run_checked(
        ['output/bin/standalone', 'pythonCommand=' + python_command])
    tt.run_checked(['diff', 'data/out2.txt', 'data/expected.txt'])
    tt.run_checked([
        'output/ModelBuilder/bin/standalone',
        'pythonCommand=' + python_command, 'trainingFile=training2Classes.csv'
    ])
    tt.run_checked(
        ['output/bin/standalone', 'pythonCommand=' + python_command])
    tt.run_checked(['diff', 'data/out2.txt', 'data/expected2Classes.txt'])
Beispiel #7
0
def test_execution(name, python_command):
    '''Execute the test case'''
    tt.run_checked(['output/bin/standalone'])