def step_impl(context): """ :type context behave.runner.Context """ ok_(context.verbs[7].children[1].children[0].identifier == 'print' and context.verbs[7].children[1].children[0].parameters == {'expression': '"This should always happen three times."'})
def test_line_labels(self): fig = plot_vs_depth(self.x,self.z, ['a', 'b', 'c']) ax = fig.get_axes()[0] assert_allclose(ax.get_lines()[0].get_xydata(), np.array([[ 1., 0.], [ 3., 2.], [ 5., 4.], [ 7., 6.], [ 9., 8.]])) assert_allclose(ax.get_lines()[1].get_xydata(), np.array([[ 2., 0.], [ 4., 2.], [ 6., 4.], [ 8., 6.], [ 10., 8.]])) assert_allclose(ax.get_lines()[2].get_xydata(), np.array([[ 3., 0.], [ 5., 2.], [ 7., 4.], [ 9., 6.], [ 11., 8.]])) assert_equal(ax.get_lines()[0].get_label(), 'a') assert_equal(ax.get_lines()[1].get_label(), 'b') assert_equal(ax.get_lines()[2].get_label(), 'c') ok_(not ax.get_legend() is None) assert_equal(ax.get_legend().get_title().get_text(), 'time:')
def step_impl(context): """ :type context behave.runner.Context """ context.original_definition = context.node.find({"name" : "Test_Process_definition_CHANGED"}, context.user)[0] _result = context.control.remove_process_definition(context.original_definition["_id"], context.user) ok_(True)
def step_impl(context): """ :type context behave.runner.Context """ print( "\n\nA running BPM process has to be killed\n==================================================\n" ) context.broker_socket.on_message = on_blocking_message context.message = { "userId": context.user["_id"], "processDefinitionId": "5564bca7a5cb644b68801b94", "destination": "agent_peer", "globals": { "global_variable": "global_variable value" }, "entryPoint": { "moduleName": "block" }, "processId": context.blocking_process_id, "runAs": "someoneelse", "sourceProcessId": str(context.process_process_id), "messageId": getNextMessageId(), "source": "broker_peer", "schemaRef": "ref://bpm.message.bpm.process.start" } context.broker_socket.received_message(json.dumps(context.message)) time.sleep(0.1) ok_(True)
def test_mark_workflow_instance_complete(self): workflow_instance = WorkflowInstance() InstanceWorkflowEngine._mark_workflow_instance_complete( workflow_instance, StatusConstants.FAILED) eq_(StatusConstants.FAILED, workflow_instance.status_id) ok_(workflow_instance.end_date is not None)
def test_line_labels(self): fig = plot_vs_time(self.t,self.y, ['a', 'b', 'c']) ax = fig.get_axes()[0] assert_allclose(ax.get_lines()[0].get_xydata(), np.array([[ 0., 1.], [ 2., 3.], [ 4., 5.], [ 6., 7.], [ 8., 9.]])) assert_allclose(ax.get_lines()[1].get_xydata(), np.array([[ 0., 2.], [ 2., 4.], [ 4., 6.], [ 6., 8.], [ 8., 10.]])) assert_allclose(ax.get_lines()[2].get_xydata(), np.array([[ 0., 3.], [ 2., 5.], [ 4., 7.], [ 6., 9.], [ 8., 11.]])) assert_equal(ax.get_lines()[0].get_label(), 'a') assert_equal(ax.get_lines()[1].get_label(), 'b') assert_equal(ax.get_lines()[2].get_label(), 'c') ok_(not ax.get_legend() is None) assert_equal(ax.get_legend().get_title().get_text(), 'Depth interval:')
def test_eval_grammar(self): ok_(eval_boolean( 'age < const:majority & "o" in name & birthdate > "1983-02-02"', {"name": "sokka", "age": 15, "birthdate": datetime.date(1984, 1, 1)}, {'majority': 18}, grammar_tokens={'belongs_to': 'in'} ))
def test_row_major_order_reverse_map(): """test for row_major_order_reverse_map""" #row_major_order_reverse_map(shape, index_steps=None, transpose=False) ok_( np.allclose( row_major_order_reverse_map(shape=(3, 3), index_steps=None, transpose=False), np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]))) ok_( np.allclose( row_major_order_reverse_map(shape=(3, 3), index_steps=(-1, 1), transpose=False), np.array([6, 7, 8, 3, 4, 5, 0, 1, 2]))) ok_( np.allclose( row_major_order_reverse_map(shape=(3, 3), index_steps=(1, -1), transpose=False), np.array([2, 1, 0, 5, 4, 3, 8, 7, 6]))) ok_( np.allclose( row_major_order_reverse_map(shape=(3, 3), index_steps=(-1, -1), transpose=False), np.array([8, 7, 6, 5, 4, 3, 2, 1, 0]))) ok_( np.allclose( row_major_order_reverse_map(shape=(3, 3), index_steps=None, transpose=True), np.array([0, 3, 6, 1, 4, 7, 2, 5, 8])))
def step_impl(context): """ :type context behave.runner.Context """ ok_(context.json[6]["children"][1]["children"][0]["identifier"] == 'print' and context.json[6]["children"][1]["children"][0]["parameters"] == {'expression': '"This should always happen three times."'})
def step_impl(context): """ :type context behave.runner.Context """ time.sleep(0.01) ok_(context.worker_handler.message['schemaRef'] == "ref://of.message.message" and context.worker_handler.message['data'] == context.message["data"])
def test_mark_pipeline_instance_complete(self): pipeline_instance = PipelineInstance() InstanceWorkflowEngine._mark_pipeline_instance_complete( pipeline_instance, StatusConstants.FAILED) eq_(StatusConstants.FAILED, pipeline_instance.status_id) ok_(pipeline_instance.end_date is not None)
def test_talbot_with_shift(): """test for Talbot numerical inverse Laplace with shift""" a = Talbot(f=f3, n=24, shift=1.0, dps=None) #single value of t: ok_(mpallclose(a(1), mpmath.exp(mpmath.mpf('1.0'))))
def test_mark_stage_instance_complete(self): stage_instance = StageInstance() InstanceWorkflowEngine._mark_stage_instance_complete( stage_instance, StatusConstants.SUCCESS) eq_(StatusConstants.SUCCESS, stage_instance.status_id) ok_(stage_instance.end_date is not None)
def test_mark_action_instance_complete(self): action_instance = ActionInstance() InstanceWorkflowEngine._mark_action_instance_complete( action_instance, StatusConstants.SUCCESS) eq_(StatusConstants.SUCCESS, action_instance.status_id) ok_(action_instance.end_date is not None)
def step_impl(context): """ :type context behave.runner.Context """ def replace_attribute(parent, attribute, new_value): parent.pop(attribute) parent.update(new_value) # Completely manually resolve the group schema (this for the test to work better even if minor changes to the schemas are made) type_schema = json_load_file(os.path.join(script_location, "../../namespaces/of/type.json")) objectId_def = type_schema["properties"]["objectId"] uuid_def = type_schema["properties"]["uuid"] datetime_def = type_schema["properties"]["datetime"] node_schema = json_load_file(os.path.join(script_location, "../../namespaces/of/node/node.json")) manually_resolved_node_properties = node_schema["properties"] replace_attribute(manually_resolved_node_properties["_id"],"$ref", objectId_def) replace_attribute(manually_resolved_node_properties["parent_id"],"$ref", objectId_def) replace_attribute(manually_resolved_node_properties["canRead"]["items"],"$ref", objectId_def) replace_attribute(manually_resolved_node_properties["canWrite"]["items"],"$ref", objectId_def) group_schema = json_load_file(os.path.join(script_location, "../../namespaces/of/node/group.json")) manually_resolved_group = deepcopy(group_schema) del manually_resolved_group["allOf"] manually_resolved_group["properties"] = node_schema["properties"] manually_resolved_group["properties"].update(group_schema["allOf"][1]["properties"]) replace_attribute(manually_resolved_group["properties"]["rights"]["items"],"$ref", objectId_def) ok_(context.resolvedGroupSchema == manually_resolved_group)
def step_impl(context): """ :type context behave.runner.Context """ ok_("ref://of.node.node" in context.schema_tools.json_schema_objects and "ref://cust.car" in context.schema_tools.json_schema_objects)
def step_impl(context): """ :type context behave.runner.Context """ ok_( cmp(os.path.join(script_dir, "../source.py"), os.path.join(script_dir, "../source_out.py"), "Files do not match!"))
def test_pickle_std(self): """ sts.stdin のラッパーオブジェクトの Pickle を確認する """ wrapper = FileObjectWrapper(sys.stdin) binary = pickle.dumps(wrapper) restored_object = pickle.loads(binary) ok_(hasattr(restored_object.file, "read"))
def step_impl(context): """ :type context behave.runner.Context """ ok_(check_session(context.session_id) is not None)
def step_impl(context): """ :type context: behave.runner.Context """ global _global_err_cmp, _global_debug_cmp, _global_err_param, _global_debug_param _debug_msg = make_textual_log_message(*_global_debug_param) ok_(_debug_msg == _global_debug_cmp, "Debug message did not match: \nResult:" + str(_debug_msg.encode()) + "\nComparison:\n" + str(_global_debug_cmp.encode()))
def test_talbot_dps_fail(): "test for Talbot numerical inverse Laplace with mpmath insufficient dps" a = Talbot(f=f1, n=200, shift=0.0, dps=None) #t=0 raise error: assert_raises(ValueError, a, 0) #single value of t: ok_(not mpallclose(a(1), mpmath.exp(mpmath.mpf('-1.0'))))
def emp_web_browse_employees(context): context.driver = webdriver.Chrome() context.driver.get("http://localhost:7799/web/emp") context.count = int(context.driver.find_element_by_id("emp_count").text) print(context.count) context.driver.save_screenshot("list.png") ok_(context.count > 0, "Employees not found") pass
def test_passes_conditional_complex_or_passes(self): handler = RemoteNotificationHandler() mock_pipeline_instance = MagicMock(status_id=2) mock_pipeline_instance.get_parameters_dict.return_value = {'__testing__': 2, '__trial__': 'True'} ok_(not handler.passes_conditional(mock_pipeline_instance, MagicMock(), '(pipelineInstance.status_id == 2 && {__testing__} == 1) || {__trial__}'))
def test_talbot_with_more_complicated(): """test for Talbot numerical inverse Laplace with sin""" a = Talbot(f=f4, n=24, shift=0, dps=None) #single value of t: ok_(mpallclose(a(2, args=(1,)), mpmath.mpf('2.0')*mpmath.sin(mpmath.mpf('2.0'))))
def step_impl(context): """ :type context behave.runner.Context """ context.original_definition = context.node.find( {"name": "Test_Process_definition_CHANGED"}, context.user)[0] _result = context.control.remove_process_definition( context.original_definition["_id"], context.user) ok_(True)
def test_save_data_grid_data_dicts(self): a = InputFileLoaderCheckerSaver() a._grid_data_dicts= {'data': np.arange(6).reshape(3,2)} a.save_data_to_file=True a._save_data() # print(os.listdir(os.path.join(self.tempdir.path,'out0002'))) ok_(os.path.isfile(os.path.join( self.tempdir.path, 'out0002','out0002.csv')))
def test_save_data_input_ext(self): a = InputFileLoaderCheckerSaver() a._input_text= "hello" a.input_ext= '.txt' a.save_data_to_file=True a._save_data() ok_(os.path.isfile(os.path.join( self.tempdir.path, 'out0002','out0002_input_original.txt')))
def step_impl(context): """ :type context behave.runner.Context """ print("Took " + str( (context.tests_ended - context.tests_started) * 1000) + " milliseconds.") ok_(context.process_result["result"] == {"result": "result"} and context.process_result["globals"] == {"context": "context"})
def test_std(self): """ sys.stdin からラッパーオブジェクトを作る """ wrapper = FileObjectWrapper(sys.stdin) eq_(wrapper.name, "<stdin>") ok_(hasattr(wrapper.file, "read")) with wrapper.file as _: pass
def step_impl(context): """ :type context behave.runner.Context """ time.sleep(0.01) context.process_instance = \ context.db_access.find({"conditions": {"name": "Test_process_name"}, "collection": "process"}, context.user)[0] ok_(context.message["name"] == context.process_instance["name"])
def step_impl(context): """ :type context behave.runner.Context """ try: context.TestInstance.not_implemented_function() ok_(False) except NotImplementedError: ok_(True)
def step_impl(context): """ :type context behave.runner.Context """ time.sleep(0.01) context.log_item = \ context.db_access.find({"conditions": {"processId": context.process_instance["_id"]}, "collection": "log"}, context.user)[0] ok_(True)
def test_talbot_with_more_complicated(): """test for Talbot numerical inverse Laplace with sin""" a = Talbot(f=f4, n=24, shift=0, dps=None) #single value of t: ok_( mpallclose(a(2, args=(1, )), mpmath.mpf('2.0') * mpmath.sin(mpmath.mpf('2.0'))))
def test_passes_conditional_fails(self): handler = RemoteNotificationHandler() mock_pipeline_instance = MagicMock(status_id=2) mock_pipeline_instance.get_parameters_dict.return_value = {} ok_(not handler.passes_conditional(mock_pipeline_instance, MagicMock(action=MagicMock(id=1), status_id=4), 'pipelineInstance.status_id == 1 and actionInstance.action.id == 1 and actionInstance.status_id == 4'))
def step_impl(context): """ :type context behave.runner.Context """ _result = context.control.load_process_definition(context.original_definition["_id"], context.user) if _result: ok_(context.original_definition == _result, "Failed to load process definition, differing result.") else: ok_(False, "Failed to load process definition, empty array. Check permissions, formatting and Id.")
def test_get_client_integration(self): sys.argv = [ 'cmonkey', '-t', 'integration', 'listUsers', ] args = _parse_args() client = _get_client(args) ok_(isinstance(client, IntegrationClient))
def step_impl(context): """ :type context behave.runner.Context """ _result = context.control.load_process_definition(context.original_definition["_id"], context.user) if len(_result) > 0: ok_(False, "Failed to load process definition, differing result.") else: ok_(True)
def test_defaults(self): fig = plot_generic_loads([[self.triple1], [self.triple2]], load_names=self.load_names) assert_equal(len(fig.get_axes()), 4) ax1 = fig.get_axes()[0] line1= ax1.get_lines()[0] ax2 = fig.get_axes()[1] line2 = ax2.get_lines()[0] ax3 = fig.get_axes()[2] line3 = ax3.get_lines()[0] ax4 = fig.get_axes()[3] line4 = ax4.get_lines()[0] #first row of charts assert_allclose(line1.get_xydata()[0], np.array([ 0, 0])) assert_allclose(line1.get_xydata()[-1], np.array([ 10, 1*np.cos(0.5*10+0.3)])) assert_allclose(line2.get_xydata()[0], np.array([ 1, 0])) assert_allclose(line2.get_xydata()[-1], np.array([ 0.5, 1])) #2nd row of charts assert_allclose(line3.get_xydata()[0], np.array([ 0, 0])) assert_allclose(line3.get_xydata()[-1], np.array([ 9, 2])) assert_allclose(line4.get_xydata()[0], np.array([ 1, 0])) assert_allclose(line4.get_xydata()[-1], np.array([ 0.8, 1])) assert_equal(ax1.get_xlabel(), '') assert_equal(ax1.get_ylabel(), 'y0') assert_equal(ax2.get_xlabel(), '') assert_equal(ax2.get_ylabel(), 'Depth, z') assert_equal(ax3.get_xlabel(), 'Time') assert_equal(ax3.get_ylabel(), 'y1') assert_equal(ax4.get_xlabel(), 'Load factor') assert_equal(ax4.get_ylabel(), 'Depth, z') assert_equal(line1.get_label(), 'a0') assert_equal(line2.get_label(), 'a0') assert_equal(line3.get_label(), 'b0') assert_equal(line4.get_label(), 'b0') ok_(not ax1.get_legend() is None) ok_(not ax3.get_legend() is None)
def test_object_members(): """test for object_members function""" import math ok_(set(['acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'hypot', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'modf', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']).issubset( set(object_members(math, 'routine', join=False))))
def step_impl(context): """ :type context behave.runner.Context """ call_api(_url="https://127.0.0.1:8080/admin/control/agent_control", _session_id=context.session["session_id"], _data={"address": "agent01", "command": "stop", "reason": "Testing to stop an Agent."}, _verify_SSL=False ) ok_(True)
def step_impl(context): """ :type context behave.runner.Context """ if os.name == "nt": time.sleep(2.6) else: time.sleep(.6) ok_(context.test_worker_process_instance)
def step_impl(context): """ :type context behave.runner.Context """ _result = context.control.load_process_definition( context.original_definition["_id"], context.user) if len(_result) > 0: ok_(False, "Failed to load process definition, differing result.") else: ok_(True)
def test_ext_epus_import(): """Check geotecha.constitutive_models.epus_ext can be imported.""" try: import geotecha.constitutive_models.epus_ext as epus_ext except ImportError: ok_( False, "Failed to import geotecha.constitutive_models.epus_ext (fortran), scalar versions will be used instead." )
def test_get_client_default(self): sys.argv = [ 'cmonkey', '-a', 'foo', '-s', 'bar', 'listUsers', ] args = _parse_args() client = _get_client(args) ok_(isinstance(client, SignatureClient))
def step_impl(context): """ :type context behave.runner.Context """ time.sleep(0.1) print("Took " + str( (context.receiver.sent_last_message_at - context.sender.received_last_message_at) * 1000) + " milliseconds.") context.message.update({"source": "source_peer"}) ok_(context.receiver.message == context.message)
def step_impl(context): """ :type context behave.runner.Context """ _result = context.node.find({"_id": of_object_id(context.curr_id)}, context.user) if len(_result) == 0: ok_(True) else: ok_(False, "Test log item wasn't remove, result:" + str(_result))
def step_impl(context): """ :type context behave.runner.Context """ _result = context.node.find({"_id": "ObjectId(" + context.curr_id + ")"}, context.user) if _result[0]["name"] == "test_node_changed": ok_(True) else: ok_(False, "Test log item didn't match change, result:" + str(_result))
def test_can_continue_workflow_should_fail(self): workflow_engine = InstanceWorkflowEngine(Mock(), Mock()) workflow_engine.status_cache[StatusConstants.SUCCESS] = Status( id=StatusConstants.SUCCESS, type=StatusTypes.SUCCESS) workflow_engine.status_cache[100] = Status(id=100, type=StatusTypes.FAILED) ok_(not workflow_engine._can_continue_workflow([ ActionInstance(status_id=100), ActionInstance(status_id=StatusConstants.SUCCESS) ]))
def test_can_continue_workflow_status_not_in_severity(self): workflow_engine = InstanceWorkflowEngine(Mock(), Mock()) workflow_engine.status_cache[StatusConstants.SUCCESS] = Status( id=StatusConstants.SUCCESS, type=StatusTypes.SUCCESS) workflow_engine.status_cache[100] = Status(id=100, type="semiborked") ok_( workflow_engine._can_continue_workflow([ ActionInstance(status_id=100), ActionInstance(status_id=StatusConstants.SUCCESS) ]))
def test_ext_integrals_import(): """Check geotecha.speccon.ext_integrals can be imported.""" # assert_raises(ImportError, import_ext_integrals) # assert(import_ext_integrals) # import geotecha.speccon.ext_integrals as ext_integ try: import geotecha.speccon.ext_integrals as ext_integ except ImportError: ok_( False, "Failed to import geotecha.speccon.ext_integrals (fortran), numpy versions will be used instead." )