def test_identify_library_by_url(self): assert_raises_regexp( Exception, "Could not locate library with URL http://bar/. Available URLs: http://foo/", self.script.set_secret, self._db, "http://bar/", "vendorid", "libraryname", "secret", None )
def test_attempt_renew_with_local_loan_and_no_available_copies(self): """We have a local loan and a remote loan but the patron tried to borrow again -- probably to renew their loan. """ # Local loan. loan, ignore = self.pool.loan_to(self.patron) # Remote loan. self.circulation.add_remote_loan( self.identifier.type, self.identifier.identifier, self.YESTERDAY, self.IN_TWO_WEEKS ) # NoAvailableCopies can happen if there are already people # waiting in line for the book. This case gives a more # specific error message. # # Contrast with the way NoAvailableCopies is handled in # test_loan_becomes_hold_if_no_available_copies. self.remote.queue_checkout(NoAvailableCopies()) assert_raises_regexp( CannotRenew, "You cannot renew a loan if other patrons have the work on hold.", self.borrow )
def test_lane_loading(self): # The default setup loads lane IDs properly. gate = COPPAGate(self._default_library, self.integration) eq_(self.lane1.id, gate.yes_lane_id) eq_(self.lane2.id, gate.no_lane_id) # If a lane isn't associated with the right library, the # COPPAGate is misconfigured and cannot be instantiated. library = self._library() self.lane1.library = library self._db.commit() assert_raises_regexp( CannotLoadConfiguration, "Lane .* is for the wrong library", COPPAGate, self._default_library, self.integration ) self.lane1.library_id = self._default_library.id # If the lane ID doesn't correspond to a real lane, the # COPPAGate cannot be instantiated. ConfigurationSetting.for_library_and_externalintegration( self._db, COPPAGate.REQUIREMENT_MET_LANE, self._default_library, self.integration ).value = -100 assert_raises_regexp( CannotLoadConfiguration, "No lane with ID: -100", COPPAGate, self._default_library, self.integration )
def error_is_raised_if_init_value_is_missing(): User = dodge.data_class("User", ["username", "password"]) assert_raises_regexp( TypeError, "^Missing argument: 'password'$", lambda: User("bob") )
def test_get_plot_point_colors_invalid_input(self): # column provided without df with npt.assert_raises(ValueError): self.min_ord_results._get_plot_point_colors(None, 'numeric', ['B', 'C'], 'jet') # df provided without column with npt.assert_raises(ValueError): self.min_ord_results._get_plot_point_colors(self.df, None, ['B', 'C'], 'jet') # column not in df with assert_raises_regexp(ValueError, 'missingcol'): self.min_ord_results._get_plot_point_colors(self.df, 'missingcol', ['B', 'C'], 'jet') # id not in df with assert_raises_regexp(ValueError, 'numeric'): self.min_ord_results._get_plot_point_colors( self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet') # missing data in df with assert_raises_regexp(ValueError, 'nancolumn'): self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn', ['B', 'C', 'A'], 'jet')
def test_simple(self): p = SimpleAuthenticationProvider integration = self._external_integration(self._str) assert_raises_regexp( CannotLoadConfiguration, "Test identifier and password not set.", p, self._default_library, integration ) integration.setting(p.TEST_IDENTIFIER).value = "barcode" integration.setting(p.TEST_PASSWORD).value = "pass" provider = p(self._default_library, integration) eq_(None, provider.remote_authenticate("user", "wrongpass")) eq_(None, provider.remote_authenticate("user", None)) eq_(None, provider.remote_authenticate(None, "pass")) user = provider.remote_authenticate("barcode", "pass") assert isinstance(user, PatronData) eq_("barcode", user.authorization_identifier) eq_("barcode_id", user.permanent_id) eq_("barcode_username", user.username) # User can also authenticate by their 'username' user2 = provider.remote_authenticate("barcode_username", "pass") eq_("barcode", user2.authorization_identifier)
def test_untied_biases(): x = tensor.tensor4('x') num_channels = 4 num_filters = 3 batch_size = 5 filter_size = (3, 3) conv = Convolutional(filter_size, num_filters, num_channels, weights_init=Constant(1.), biases_init=Constant(2.), image_size=(28, 30), tied_biases=False) conv.initialize() y = conv.apply(x) func = function([x], y) # Untied biases provide a bias for every individual output assert_allclose(conv.b.eval().shape, (3, 26, 28)) # Untied biases require images of a specific size x_val_1 = numpy.ones((batch_size, num_channels, 28, 30), dtype=theano.config.floatX) assert_allclose(func(x_val_1), numpy.prod(filter_size) * num_channels * numpy.ones((batch_size, num_filters, 26, 28)) + 2) x_val_2 = numpy.ones((batch_size, num_channels, 23, 19), dtype=theano.config.floatX) def wrongsize(): func(x_val_2) assert_raises_regexp(AssertionError, 'AbstractConv shape mismatch', wrongsize)
def test_tutorial_create_hands_on_tutorial(): """Test :func:`planemo.training.tutorial.tutorial.create_hands_on_tutorial`.""" tuto = Tutorial( training=training, topic=topic) os.makedirs(tuto.wf_dir) # with init_wf_id and no Galaxy URL tuto.init_wf_id = 'ID' tuto.training.galaxy_url = None exp_exception = "No Galaxy URL given" with assert_raises_regexp(Exception, exp_exception): tuto.create_hands_on_tutorial(CTX) # with init_wf_id and no Galaxy API key tuto.init_wf_id = 'ID' tuto.training.galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) tuto.training.galaxy_api_key = None exp_exception = "No API key to access the given Galaxy instance" with assert_raises_regexp(Exception, exp_exception): tuto.create_hands_on_tutorial(CTX) # with init_wf_id assert is_galaxy_engine(**KWDS) with engine_context(CTX, **KWDS) as galaxy_engine: with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: tuto.init_wf_id = config.workflow_id(WF_FP) tuto.training.galaxy_api_key = config.user_api_key tuto.create_hands_on_tutorial(CTX) assert os.path.exists(tuto.tuto_fp) os.remove(tuto.tuto_fp) # with init_wf_fp tuto.init_wf_id = None tuto.init_wf_fp = WF_FP tuto.create_hands_on_tutorial(CTX) assert os.path.exists(tuto.tuto_fp) shutil.rmtree("topics")
def test_worked_hours_custom_end_time(self): day_1 = day.Day("2014-09-01") nt.assert_raises_regexp( errors.ChronoError, "Custom end times can only be tried on days in progress.", day_1.worked_hours, end_time=datetime.datetime(2014, 9, 1, hour=17)) day_1.report_start_time("8:00") nt.assert_equal( day_1.worked_hours( end_time=datetime.datetime(2014, 9, 1, hour=16, minute=30)), datetime.timedelta(hours=8, minutes=30)) day_1.report_lunch_duration("0:30") nt.assert_equal( day_1.worked_hours( end_time=datetime.datetime(2014, 9, 1, hour=16, minute=30)), datetime.timedelta(hours=8)) day_1.report_deviation("0:45") nt.assert_equal( day_1.worked_hours( end_time=datetime.datetime(2014, 9, 1, hour=16, minute=30)), datetime.timedelta(hours=7, minutes=15)) day_1.report_end_time("17:00") nt.assert_raises_regexp( errors.ChronoError, "Custom end times can only be tried on days in progress.", day_1.worked_hours, end_time=datetime.datetime(2014, 9, 1, hour=16, minute=30))
def test_by_name_and_protocol(self): name = "A name" protocol = ExternalIntegration.OVERDRIVE key = (name, protocol) # Cache is empty. eq_(HasFullTableCache.RESET, Collection._cache) collection1, is_new = Collection.by_name_and_protocol( self._db, name, ExternalIntegration.OVERDRIVE ) eq_(True, is_new) # Cache was populated and then reset because we created a new # Collection. eq_(HasFullTableCache.RESET, Collection._cache) collection2, is_new = Collection.by_name_and_protocol( self._db, name, ExternalIntegration.OVERDRIVE ) eq_(collection1, collection2) eq_(False, is_new) # This time the cache was not reset after being populated. eq_(collection1, Collection._cache[key]) # You'll get an exception if you look up an existing name # but the protocol doesn't match. assert_raises_regexp( ValueError, 'Collection "A name" does not use protocol "Bibliotheca".', Collection.by_name_and_protocol, self._db, name, ExternalIntegration.BIBLIOTHECA )
def test_add_flow_with_only_source(self): """Test plugin deny add flow with only source.""" data = { "kind": "Access Control List", "rules": [{ "action": "permit", "description": "Restrict environment", "icmp-options": { "icmp-code": "0", "icmp-type": "8" }, "id": "82325", "owner": "networkapi", "protocol": "icmp", "source": "0.0.0.0/0" }] } rule = dumps(data['rules'][0], sort_keys=True) assert_raises_regexp( ValueError, "Error building ACL Json. Malformed input data: \n%s" % rule, self.odl.add_flow, data )
def test_bad_connection_remote_pin_test(self): api = self.mock_api(bad_connection=True) assert_raises_regexp( RemoteInitiatedServerError, "Could not connect!", api.remote_pin_test, "key", "pin" )
def test_broken_service_remote_pin_test(self): api = self.mock_api(failure_status_code=502) assert_raises_regexp( RemoteInitiatedServerError, "Got unexpected response code 502. Content: Error 502", api.remote_pin_test, "key", "pin" )
def test_protocol_enforcement(self): """A CollectionMonitor can require that it be instantiated with a Collection that implements a certain protocol. """ class NoProtocolMonitor(CollectionMonitor): SERVICE_NAME = "Test Monitor 1" PROTOCOL = None class OverdriveMonitor(CollectionMonitor): SERVICE_NAME = "Test Monitor 2" PROTOCOL = ExternalIntegration.OVERDRIVE # Two collections. c1 = self._collection(protocol=ExternalIntegration.OVERDRIVE) c2 = self._collection(protocol=ExternalIntegration.BIBLIOTHECA) # The NoProtocolMonitor can be instantiated with either one, # or with no Collection at all. NoProtocolMonitor(self._db, c1) NoProtocolMonitor(self._db, c2) NoProtocolMonitor(self._db, None) # The OverdriveMonitor can only be instantiated with the first one. OverdriveMonitor(self._db, c1) assert_raises_regexp( ValueError, "Collection protocol \(Bibliotheca\) does not match Monitor protocol \(Overdrive\)", OverdriveMonitor, self._db, c2 ) assert_raises( CollectionMissing, OverdriveMonitor, self._db, None )
def test_internal_server_error(self): data = self.sample_data("invalid_error_code.xml") parser = HoldReleaseResponseParser() assert_raises_regexp( RemoteInitiatedServerError, "Invalid response code from Axis 360: abcd", parser.process_all, data )
def test_missing_error_code(self): data = self.sample_data("missing_error_code.xml") parser = HoldReleaseResponseParser() assert_raises_regexp( RemoteInitiatedServerError, "No status code!", parser.process_all, data )
def test_for_collection(self): # This collection has no mirror_integration, so # there is no MirrorUploader for it. collection = self._collection() eq_(None, MirrorUploader.for_collection(collection)) # We can tell the method that we're okay with a sitewide # integration instead of an integration specifically for this # collection. sitewide_integration = self._integration uploader = MirrorUploader.for_collection(collection, use_sitewide=True) assert isinstance(uploader, MirrorUploader) # This collection has a properly configured mirror_integration, # so it can have an MirrorUploader. collection.mirror_integration = self._integration uploader = MirrorUploader.for_collection(collection) assert isinstance(uploader, MirrorUploader) # This collection has a mirror_integration but it has the # wrong goal, so attempting to make an MirrorUploader for it # raises an exception. collection.mirror_integration.goal = ExternalIntegration.LICENSE_GOAL assert_raises_regexp( CannotLoadConfiguration, "from an integration with goal=licenses", MirrorUploader.for_collection, collection )
def test_internal_server_error(self): data = self.sample_data("internal_server_error.xml") parser = HoldReleaseResponseParser() assert_raises_regexp( RemoteInitiatedServerError, "Internal Server Error", parser.process_all, data )
def test_empty_token(self): # Test the behavior when a credential is empty. # First, create a token with an empty credential. data_source = DataSource.lookup(self._db, DataSource.ADOBE) token, is_new = Credential.persistent_token_create( self._db, data_source, "i am empty", None ) token.credential = None # If allow_empty_token is true, the token is returned as-is # and the refresher method is not called. def refresher(self): raise Exception("Refresher method was called") args = self._db, data_source, token.type, None, refresher, again_token = Credential.lookup( *args, allow_persistent_token=True, allow_empty_token=True ) eq_(again_token, token) # If allow_empty_token is False, the refresher method is # created. assert_raises_regexp( Exception, "Refresher method was called", Credential.lookup, *args, allow_persistent_token=True, allow_empty_token=False )
def test_add_flow_without_icmp_code_and_icmp_type(self): """Test plugin deny add flow without icmp-code and icmp-type.""" data = { "kind": "Access Control List", "rules": [{ "id": 1, "protocol": "icmp", "source": "10.0.0.1/32", "destination": "10.0.0.2/32", "icmp-options": { } }] } rule = dumps(data['rules'][0], sort_keys=True) assert_raises_regexp( ValueError, "Error building ACL Json. Malformed input data: \n" "Missing icmp-code or icmp-type icmp options:\n%s" % rule, self.odl.add_flow, data )
def test_calling_atom_raises_exception(): """A function call to a non-function should result in an error.""" with assert_raises_regexp(LispError, "not a function"): evaluate(parse("(#t 'foo 'bar)"), Environment()) with assert_raises_regexp(LispError, "not a function"): evaluate(parse("(42)"), Environment())
def test_add_flow_without_source_and_destination(self): """Test plugin deny add flow without source and destination.""" data = { "kind": "default#acl", "rules": [{ "action": "permit", "description": "generic", "icmp-options": { "icmp-code": "0", "icmp-type": "8" }, "id": "82325", "owner": "networkapi", "protocol": "icmp" }] } rule = dumps(data['rules'][0], sort_keys=True) assert_raises_regexp( ValueError, "Error building ACL Json. Malformed input data: \n%s" % rule, self.odl.add_flow, data )
def error_is_raised_if_too_many_positional_arguments_are_passed_to_init(): User = dodge.data_class("User", ["username", "password"]) assert_raises_regexp( TypeError, r"takes 2 positional arguments but 3 were given", lambda: User("bob", "password1", "salty") )
def test_create_external_integration(self): # A newly created Collection has no associated ExternalIntegration. collection, ignore = get_one_or_create( self._db, Collection, name=self._str ) eq_(None, collection.external_integration_id) assert_raises_regexp( ValueError, "No known external integration for collection", getattr, collection, 'external_integration' ) # We can create one with create_external_integration(). overdrive = ExternalIntegration.OVERDRIVE integration = collection.create_external_integration(protocol=overdrive) eq_(integration.id, collection.external_integration_id) eq_(overdrive, integration.protocol) # If we call create_external_integration() again we get the same # ExternalIntegration as before. integration2 = collection.create_external_integration(protocol=overdrive) eq_(integration, integration2) # If we try to initialize an ExternalIntegration with a different # protocol, we get an error. assert_raises_regexp( ValueError, "Located ExternalIntegration, but its protocol \(Overdrive\) does not match desired protocol \(blah\).", collection.create_external_integration, protocol="blah" )
def test_for_foreign_id_rejects_invalid_identifiers(self): assert_raises_regexp( ValueError, '"foo/bar" is not a valid Bibliotheca ID.', Identifier.for_foreign_id, self._db, Identifier.BIBLIOTHECA_ID, "foo/bar" )
def test_verb_alias_config_initialization(): cwd = os.getcwd() test_folder = os.path.join(cwd, 'test') # Test target directory does not exist failure with assert_raises_regexp(RuntimeError, "Cannot initialize verb aliases because catkin configuration path"): config.initialize_verb_aliases(test_folder) # Test normal case os.makedirs(test_folder) config.initialize_verb_aliases(test_folder) assert os.path.isdir(test_folder) assert os.path.isdir(os.path.join(test_folder, 'verb_aliases')) defaults_path = os.path.join(test_folder, 'verb_aliases', '00-default-aliases.yaml') assert os.path.isfile(defaults_path) # Assert a second invocation is fine config.initialize_verb_aliases(test_folder) # Check that replacement of defaults works with open(defaults_path, 'w') as f: f.write("This should be overwritten (simulation of update needed)") with redirected_stdio() as (out, err): config.initialize_verb_aliases(test_folder) assert "Warning, builtin verb aliases at" in out.getvalue(), out.getvalue() shutil.rmtree(test_folder) # Check failure from verb aliases folder existing as a file os.makedirs(test_folder) with open(os.path.join(test_folder, 'verb_aliases'), 'w') as f: f.write("this will cause a RuntimeError") with assert_raises_regexp(RuntimeError, "The catkin verb aliases config directory"): config.initialize_verb_aliases(test_folder) shutil.rmtree(test_folder)
def test_misconfigured_authentication_mode(self): assert_raises_regexp( CannotLoadConfiguration, "Unrecognized Millenium Patron API authentication mode: nosuchauthmode.", self.mock_api, auth_mode = 'nosuchauthmode' )
def test_convolutional_sequence_with_no_input_size(): # suppose x is outputted by some RNN x = tensor.tensor4('x') filter_size = (1, 1) num_filters = 2 num_channels = 1 pooling_size = (1, 1) conv = Convolutional(filter_size, num_filters, tied_biases=False, weights_init=Constant(1.), biases_init=Constant(1.)) act = Rectifier() pool = MaxPooling(pooling_size) bad_seq = ConvolutionalSequence([conv, act, pool], num_channels, tied_biases=False) assert_raises_regexp(ValueError, 'Cannot infer bias size \S+', bad_seq.initialize) seq = ConvolutionalSequence([conv, act, pool], num_channels, tied_biases=True) try: seq.initialize() out = seq.apply(x) except TypeError: assert False, "This should have succeeded" assert out.ndim == 4
def test_define_with_wrong_number_of_arguments(): """Defines should have exactly two arguments, or raise an error""" with assert_raises_regexp(LispError, "Wrong number of arguments"): evaluate(parse("(define x)"), Environment()) with assert_raises_regexp(LispError, "Wrong number of arguments"): evaluate(parse("(define x 1 2)"), Environment())
def test_invalid_generalization(self): my_point = Point(1, 3) assert_raises_regexp( RecordInstanceError, "^Record type Point is not a subtype of Point3D$", Point3D.init_from_specialization, my_point, )
def test_circulationdata_may_require_collection(self): """Depending on the information provided in a CirculationData object, it might or might not be possible to call apply() without providing a Collection. """ identifier = IdentifierData(Identifier.OVERDRIVE_ID, "1") format = FormatData(Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.NO_DRM, rights_uri=RightsStatus.IN_COPYRIGHT) circdata = CirculationData(DataSource.OVERDRIVE, primary_identifier=identifier, formats=[format]) circdata.apply(self._db, collection=None) # apply() has created a LicensePoolDeliveryMechanism for this # title, even though there are no LicensePools for it. identifier_obj, ignore = identifier.load(self._db) eq_([], identifier_obj.licensed_through) [lpdm] = identifier_obj.delivery_mechanisms eq_(DataSource.OVERDRIVE, lpdm.data_source.name) eq_(RightsStatus.IN_COPYRIGHT, lpdm.rights_status.uri) mechanism = lpdm.delivery_mechanism eq_(Representation.EPUB_MEDIA_TYPE, mechanism.content_type) eq_(DeliveryMechanism.NO_DRM, mechanism.drm_scheme) # But if we put some information in the CirculationData # that can only be stored in a LicensePool, there's trouble. circdata.licenses_owned = 0 assert_raises_regexp( ValueError, 'Cannot store circulation information because no Collection was provided.', circdata.apply, self._db, collection=None)
def test_report_lunch(self): day_1 = day.Day("2014-09-01") nt.assert_equal(day_1.lunch_duration, None) nt.assert_raises_regexp( errors.ReportError, "^Date 2014-09-01 must have a start time before a lunch duration " "can be reported.", day_1.report_lunch_duration, "1:00") day_1.report_start_time("8:00") day_1.report_lunch_duration("1:00") nt.assert_equal(day_1.lunch_duration, datetime.timedelta(hours=1)) day_2 = day.Day("2014-09-02") day_2.report_start_time("8:10") day_2.report_lunch_duration("0:45") nt.assert_equal(day_2.lunch_duration, datetime.timedelta(minutes=45)) nt.assert_raises_regexp( errors.ReportError, "^Date 2014-09-01 allready has a lunch duration.", day_1.report_lunch_duration, "1:00")
def test_config_initialization(patched_func): cwd = os.getcwd() test_folder = os.path.join(cwd, 'test') # Test normal case config.initialize_config(test_folder) assert os.path.isdir(test_folder) assert not os.path.exists(os.path.join(test_folder, 'verb_aliases')) # Assert a second invocation is fine config.initialize_config(test_folder) shutil.rmtree(test_folder) # Test failure with file for target config path with open(test_folder, 'w') as f: f.write('this will cause a RuntimeError') with assert_raises_regexp(RuntimeError, "The catkin config directory"): config.initialize_config(test_folder)
def no_call_pure_virtual_method(mthd, *args, **kwargs): """ Catches a RuntimeError raised by attempting to call a bound method with an interface implementation. Used to test that a pybind11 trampoline pattern applied to an interface class behaves correctly Will raise exception if not virtual method :param: mthd: The method to be called :param: *args: Args forwarded to method call :param: **kwargs: Kwargs forwarded to method call :return: """ with nt.assert_raises_regexp( RuntimeError, "Tried to call pure virtual function", ): mthd(*args, **kwargs)
def test_simple(self): p = SimpleAuthenticationProvider integration = self._external_integration(self._str) assert_raises_regexp(CannotLoadConfiguration, "Test identifier and password not set.", p, self._default_library, integration) integration.setting(p.TEST_IDENTIFIER).value = "barcode" integration.setting(p.TEST_PASSWORD).value = "pass" provider = p(self._default_library, integration) eq_(None, provider.remote_authenticate("user", "wrongpass")) eq_(None, provider.remote_authenticate("user", None)) eq_(None, provider.remote_authenticate(None, "pass")) user = provider.remote_authenticate("barcode", "pass") assert isinstance(user, PatronData) eq_("barcode", user.authorization_identifier) eq_("barcode_id", user.permanent_id) eq_("barcode_username", user.username) # User can also authenticate by their 'username' user2 = provider.remote_authenticate("barcode_username", "pass") eq_("barcode", user2.authorization_identifier)
def test_check_axis_angle(): """Test input validation for axis-angle representation.""" a_list = [1, 0, 0, 0] a = check_axis_angle(a_list) assert_array_almost_equal(a_list, a) assert_equal(type(a), np.ndarray) assert_equal(a.dtype, np.float) random_state = np.random.RandomState(0) a = np.empty(4) a[:3] = random_vector(random_state, 3) a[3] = random_state.randn() * 4.0 * np.pi a2 = check_axis_angle(a) assert_axis_angle_equal(a, a2) assert_almost_equal(np.linalg.norm(a2[:3]), 1.0) assert_greater(a2[3], 0) assert_greater(np.pi, a2[3]) assert_raises_regexp(ValueError, "Expected axis and angle in array with shape", check_axis_angle, np.zeros(3)) assert_raises_regexp(ValueError, "Expected axis and angle in array with shape", check_axis_angle, np.zeros((3, 3)))
def test_request_failure(self): """Verify that certain unexpected HTTP results are turned into IntegrationExceptions. """ self.api.api_key = "some key" def result_403(*args, **kwargs): return 403, None, None self.api.do_get = result_403 assert_raises_regexp(IntegrationException, "API authentication failed", self.api.request, "some path") def result_500(*args, **kwargs): return 500, {}, "bad value" self.api.do_get = result_500 try: self.api.request("some path") raise Exception("Expected an IntegrationException!") except IntegrationException, e: eq_("Unknown API error (status 500)", e.message) assert e.debug_message.startswith("Response from") assert e.debug_message.endswith("was: 'bad value'")
def test_verify_status_code(self): success = dict(Status=dict(Code=0000)) failure = dict(Status=dict(Code=1000, Message="A message")) missing = dict() m = JSONResponseParser.verify_status_code # If the document's Status object indicates success, nothing # happens. m(success) # If it indicates failure, an appropriate exception is raised. assert_raises_regexp( PatronAuthorizationFailedException, "A message", m, failure ) # If the Status object is missing, a more generic exception is # raised. assert_raises_regexp( RemoteInitiatedServerError, "Required key Status not present in Axis 360 fulfillment document", m, missing )
def test_read_conv_pp(): my_dir = os.path.join(os.path.expanduser("~"), "reegis_opsd_test") os.makedirs(my_dir, exist_ok=True) cfg.tmp_set("paths_pattern", "opsd", my_dir) cfg.tmp_set("paths", "powerplants", my_dir) with assert_raises_regexp(ValueError, "Category 'conv' is not valid."): opsd.load_original_opsd_file("conv", True) df = opsd.load_original_opsd_file("conventional", True) for f in [ "conventional_readme.md", "conventional_datapackage.json", "conventional_power_plants_DE.csv", ]: ok_(os.path.isfile(os.path.join(my_dir, f))) rmtree(my_dir) eq_(int(df["capacity_net_bnetza"].sum()), 118684)
def test_tutorial_init_from_existing_tutorial(): """Test :func:`planemo.training.tutorial.tutorial.init_from_existing_tutorial`.""" tuto = Tutorial(training=training, topic=topic) # non existing tutorial exp_exception = "The tutorial existing_tutorial does not exists. It should be created" with assert_raises_regexp(Exception, exp_exception): tuto.init_from_existing_tutorial('existing_tutorial') # existing tutorial create_existing_tutorial('existing_tutorial', tuto_fp, tuto.topic) tuto.init_from_existing_tutorial('existing_tutorial') assert tuto.title == 'A tutorial to test' assert "A learning objective" in tuto.objectives assert tuto.time_estimation == "1H" assert 'the_best_contributor' in tuto.contributors assert '# First section' in tuto.body shutil.rmtree("topics")
def _test_invalid_property_value( self, property_, property_value, exc_message_template, ): saved_contacts = make_contacts(1) connection = \ self._make_connection_for_contacts(saved_contacts, property_) contact_with_invalid_property_value = \ make_contact(1, {property_.name: property_value}) exc_message = exc_message_template.format(repr(property_value)) with assert_raises_regexp(HubspotPropertyValueError, exc_message): with connection: save_contacts([contact_with_invalid_property_value], connection)
def test_offsettransformer__too_many_input_flows(): """Too many Input Flows defined.""" with tools.assert_raises_regexp( ValueError, 'OffsetTransformer` must not have more than 1'): bgas = solph.Bus(label='GasBus') bcoal = solph.Bus(label='CoalBus') solph.components.OffsetTransformer( label='ostf_2_in', inputs={ bgas: solph.Flow( nominal_value=60, min=0.5, max=1.0, nonconvex=solph.NonConvex()), bcoal: solph.Flow( nominal_value=30, min=0.3, max=1.0, nonconvex=solph.NonConvex()) }, coefficients=(20, 0.5))
def test_offsettransformer_too_many_output_flows(): """Too many Output Flows defined.""" with tools.assert_raises_regexp( ValueError, 'OffsetTransformer` must not have more than 1'): bm1 = solph.Bus(label='my_offset_Bus1') bm2 = solph.Bus(label='my_offset_Bus2') solph.components.OffsetTransformer( label='ostf_2_out', inputs={ bm1: solph.Flow( nominal_value=60, min=0.5, max=1.0, nonconvex=solph.NonConvex()) }, outputs={bm1: solph.Flow(), bm2: solph.Flow()}, coefficients=(20, 0.5))
def test_annuity_exceptions(): """Test out-of-bounds-error of the annuity tool.""" msg = "Input arguments for 'annuity' out of bounds!" assert_raises_regexp(ValueError, msg, economics.annuity, 1000, 10, 2) assert_raises_regexp(ValueError, msg, economics.annuity, 1000, 0.5, 1) assert_raises_regexp(ValueError, msg, economics.annuity, 1000, 10, 0.1, u=0.3) assert_raises_regexp(ValueError, msg, economics.annuity, 1000, 10, 0.1, cost_decrease=-1)
def checkPlotfToFileBadArgs(basenames, specs): # Enable the persistent_inference_trace in order to trigger the # inference prelude entry skipping hack in error annotation ripl = get_ripl(persistent_inference_trace=True) ripl.assume('x', '(normal 0 1)') infer = """ (let ((d (empty))) (do (repeat 10 (do (mh default one 10) (bind (collect x) (curry into d)))) (plotf_to_file (quote {0}) (quote {1}) d)))""" infer = infer.format(basenames, specs) with assert_raises_regexp( VentureException, 'evaluation: The number of specs must match the number of filenames.' ) as cm: ripl.infer(infer) assert "stack_trace" in cm.exception.data # I.e., error annotation succeeded.
def test_infeasible_model(): with tools.assert_raises_regexp(ValueError, ''): with warnings.catch_warnings(record=True) as w: es = solph.EnergySystem(timeindex=[1]) bel = solph.Bus(label='bus') es.add(bel) es.add( solph.Sink(inputs={ bel: solph.Flow(nominal_value=5, actual_value=[1], fixed=True) })) es.add( solph.Source(outputs={ bel: solph.Flow(nominal_value=4, variable_costs=5) })) m = solph.models.Model(es, timeincrement=1) m.solve(solver='cbc') assert "Optimization ended with status" in str(w[0].message) outputlib.processing.meta_results(m)
def test_BloscpackHeader_accessor_exceptions(): if sys.version_info[0:2] < (2, 7): raise SkipTest bloscpack_header = BloscpackHeader() nt.assert_raises_regexp(KeyError, 'foo not in BloscpackHeader', bloscpack_header.__getitem__, 'foo') nt.assert_raises_regexp(KeyError, 'foo not in BloscpackHeader', bloscpack_header.__setitem__, 'foo', 'bar') nt.assert_raises_regexp( NotImplementedError, 'BloscpackHeader does not support __delitem__ or derivatives', bloscpack_header.__delitem__, 'foo', )
def test_transform_not_added(): """Test request for transforms that have not been added.""" random_state = np.random.RandomState(0) A2B = random_transform(random_state) C2D = random_transform(random_state) tm = TransformManager() tm.add_transform("A", "B", A2B) tm.add_transform("C", "D", C2D) assert_raises_regexp(KeyError, "Unknown frame", tm.get_transform, "A", "G") assert_raises_regexp(KeyError, "Unknown frame", tm.get_transform, "G", "D") assert_raises_regexp(KeyError, "Cannot compute path", tm.get_transform, "A", "D")
def test_fill_data_library(): """Test :func:`planemo.training.fill_data_library`.""" train = Training(KWDS) train.kwds['tutorial_name'] = None train.kwds['slides'] = False train.kwds['hands_on'] = False train.init_training(CTX) train.kwds['tutorial_name'] = 'existing_tutorial' create_existing_tutorial('existing_tutorial', tuto_wo_zenodo_fp, train.topic) # no Zenodo link train.kwds['zenodo_link'] = None exp_exception = "A Zenodo link should be provided either in the metadata file or as argument of the command" with assert_raises_regexp(Exception, exp_exception): train.fill_data_library(CTX) # with a given Zenodo link and no Zenodo in metadata train.kwds['zenodo_link'] = zenodo_link train.fill_data_library(CTX) with open(train.tuto.data_lib_fp, 'r') as fh: assert 'DOI: 10.5281/zenodo.1321885' in fh.read() with open(train.tuto.tuto_fp, 'r') as fh: assert 'zenodo_link: %s' % zenodo_link in fh.read() # with a given Zenodo link and Zenodo in metadata new_z_link = 'https://zenodo.org/record/1324204' train.kwds['zenodo_link'] = new_z_link train.tuto = None train.fill_data_library(CTX) with open(train.tuto.data_lib_fp, 'r') as fh: assert 'DOI: 10.5281/zenodo.1324204' in fh.read() with open(train.tuto.tuto_fp, 'r') as fh: assert 'zenodo_link: %s' % new_z_link in fh.read() # with no given Zenodo link train.kwds['zenodo_link'] = None train.fill_data_library(CTX) with open(train.tuto.data_lib_fp, 'r') as fh: assert 'DOI: 10.5281/zenodo.1324204' in fh.read() with open(train.tuto.tuto_fp, 'r') as fh: assert 'zenodo_link: %s' % new_z_link in fh.read() # clean after shutil.rmtree(train.topics_dir) shutil.rmtree("metadata")
def test_generate_tuto_from_wf(): """Test :func:`planemo.training.generate_tuto_from_wf`.""" train = Training(KWDS) train.kwds['tutorial_name'] = None train.kwds['slides'] = False train.init_training(CTX) train.kwds['tutorial_name'] = 'existing_tutorial' create_existing_tutorial('existing_tutorial', tuto_fp, train.topic) # no workflow train.kwds['workflow'] = None exp_exception = "A path to a local workflow or the id of a workflow on a running Galaxy instance should be provided" with assert_raises_regexp(Exception, exp_exception): train.generate_tuto_from_wf(CTX) # with workflow train.kwds['workflow'] = WF_FP train.generate_tuto_from_wf(CTX) assert '**FastQC** {% icon tool %} with the following parameters:' in open(train.tuto.tuto_fp, 'r').read() assert os.path.exists(train.tuto.wf_fp) # clean after shutil.rmtree(train.topics_dir) shutil.rmtree("metadata")
def test_report_start_time(self): day_1 = day.Day("2014-09-01") nt.assert_equal(day_1.start_time, None) day_1.report_start_time("8:00") nt.assert_equal(day_1.start_time, datetime.datetime(year=2014, month=9, day=1, hour=8)) day_2 = day.Day("2014-09-02") day_2.report_start_time("8:30") nt.assert_equal( day_2.start_time, datetime.datetime(year=2014, month=9, day=2, hour=8, minute=30)) nt.assert_raises_regexp( errors.ReportError, "^Date 2014-09-01 allready has a start time.", day_1.report_start_time, "8.00") day_3 = day.Day("2014-09-03") nt.assert_raises_regexp( errors.BadTimeError, "^Bad start time: \"8\".", day_3.report_start_time, "8") nt.assert_raises_regexp( errors.BadTimeError, "^Bad start time: \"8.30\".", day_3.report_start_time, "8.30") nt.assert_raises_regexp( errors.BadTimeError, "^Bad start time: \"8:10:14\".", day_3.report_start_time, "8:10:14")
def test_training_check_topic_init_tuto(): """Test :func:`planemo.training.Training.check_topic_init_tuto`.""" train = Training(KWDS) # no topic exp_exception = "The topic my_new_topic does not exists. It should be created" with assert_raises_regexp(Exception, exp_exception): train.check_topic_init_tuto() # add topic train.kwds['tutorial_name'] = None train.kwds['slides'] = None train.kwds['workflow'] = None train.kwds['workflow_id'] = None train.kwds['zenodo_link'] = None train.init_training(CTX) train.kwds['tutorial_name'] = 'existing_tutorial' create_existing_tutorial('existing_tutorial', tuto_fp, train.topic) train.check_topic_init_tuto() assert train.tuto.name == train.kwds['tutorial_name'] assert train.tuto.datatype_fp # clean after shutil.rmtree(train.topics_dir) shutil.rmtree("metadata")
def test_bad_data_type(): """ Notify if column data type is wrong """ patient = get_patient() bad_column_mapping = { 'column': 'data_proxima_consulta', 'data_type': 'omrs_datetime', 'commcare_data_type': 'cc_date', 'property': 'data_proxima_consulta' } with get_importer(bad_column_mapping) as importer: with assert_raises_regexp( ConfigurationError, 'Errors importing from <OpenmrsImporter None admin@http://www.example.com/openmrs>:\n' 'Unable to deserialize value 1551564000000 ' 'in column "data_proxima_consulta" ' 'for case property "data_proxima_consulta". ' 'OpenMRS data type is given as "omrs_datetime". ' 'CommCare data type is given as "cc_date": ' "argument of type 'int' is not iterable"): get_case_properties(patient, importer)
def test_mutability(self): """ test immutability """ source = """ np.arange(20) """ source = dedent(source) code = ast.parse(source) new_num = ast.Num(n=3) expr1 = Expression(code.body[0]) with nt.assert_raises_regexp(Exception, "This expression is not mutable"): expr1.replace(new_num, expr1.code.body, 'args', 0) expr2 = expr1.copy(mutable=True) old_key = expr2.key expr2.replace(new_num, expr2.code.body, 'args', 0) nt.assert_not_equal(expr2.key, old_key) # expr2 was changed nt.assert_false(ast_equal(expr1.code, expr2.code)) nt.assert_equal(expr2.get_source(), 'np.arange(3)')
def test_bad_date(self): nt.assert_raises_regexp(errors.BadDateError, "^Bad date string: \"\"$", day.Day, "") nt.assert_raises_regexp(errors.BadDateError, "^Bad date string: \"25-09-2014\"$", day.Day, "25-09-2014") nt.assert_raises_regexp(TypeError, "^Given date must be a string.$", day.Day, datetime.date(2014, 9, 25)) nt.assert_raises(errors.BadDateError, day.Day, "20140925")
def test_batch_concatenate_quaternions_mismatch(): Q1 = np.zeros((1, 2, 4)) Q2 = np.zeros((1, 2, 3, 4)) assert_raises_regexp( ValueError, "Number of dimensions must be the same.", pbr.batch_concatenate_quaternions, Q1, Q2) Q1 = np.zeros((1, 2, 4, 4)) Q2 = np.zeros((1, 2, 3, 4)) assert_raises_regexp( ValueError, "Size of dimension 3 does not match", pbr.batch_concatenate_quaternions, Q1, Q2) Q1 = np.zeros((1, 2, 3, 3)) Q2 = np.zeros((1, 2, 3, 4)) assert_raises_regexp( ValueError, "Last dimension of first argument does not match.", pbr.batch_concatenate_quaternions, Q1, Q2) Q1 = np.zeros((1, 2, 3, 4)) Q2 = np.zeros((1, 2, 3, 3)) assert_raises_regexp( ValueError, "Last dimension of second argument does not match.", pbr.batch_concatenate_quaternions, Q1, Q2)
def test_trailblaze_setters(self): """Test that the special setters and the global parameter function shortcut are working properly.""" # Create a harmonic oscillator system to test. compound_state, sampler_state = self.get_harmonic_oscillator() mcmc_move = self.get_langevin_dynamics_move() # Assign to the parameter a function and run the # trailblaze algorithm over the function variable. global_parameter_functions = {self.PAR_NAME_X0: 'lambda**2'} function_variables = ['lambda'] # Make sure it's not possible to have a parameter defined as a function and as a parameter state as well. err_msg = f"Cannot specify {self.PAR_NAME_X0} in 'state_parameters' and 'global_parameter_functions'" with assert_raises_regexp(ValueError, err_msg): run_thermodynamic_trailblazing( compound_state, sampler_state, mcmc_move, state_parameters=[(self.PAR_NAME_X0, [0.0, 1.0])], global_parameter_functions=global_parameter_functions, function_variables=function_variables, ) # Trailblaze returns the protocol for the actual parameters, not for the function variables. protocol = run_thermodynamic_trailblazing( compound_state, sampler_state, mcmc_move, state_parameters=[('lambda', [0.0, 1.0])], global_parameter_functions=global_parameter_functions, function_variables=function_variables, ) assert list(protocol.keys()) == [self.PAR_NAME_X0] parameter_protocol = protocol[self.PAR_NAME_X0] assert parameter_protocol[0] == 0 assert parameter_protocol[-1] == 1
def test_credential_refresh_failure(self): """Verify that a useful error message results when the Odilo bearer token cannot be refreshed, since this is the most likely point of failure on a new setup. """ self.api.access_token_response = MockRequestsResponse( 200, {"Content-Type": "text/html"}, "Hi, this is the website, not the API." ) credential = self.api.credential_object(lambda x: x) assert_raises_regexp( BadResponseException, "Bad response from .*: .* may not be the right base URL. Response document was: 'Hi, this is the website, not the API.'", self.api.refresh_creds, credential ) # Also test a 400 response code. self.api.access_token_response = MockRequestsResponse( 400, {"Content-Type": "application/json"}, json.dumps(dict(errors=[dict(description="Oops")])) ) assert_raises_regexp( BadResponseException, "Bad response from .*: Oops", self.api.refresh_creds, credential ) # If there's a 400 response but no error information, # the generic error message is used. self.api.access_token_response = MockRequestsResponse( 400, {"Content-Type": "application/json"}, json.dumps(dict()) ) assert_raises_regexp( BadResponseException, "Bad response from .*: .* may not be the right base URL.", self.api.refresh_creds, credential )
def test_get_oob(self): f2fs = F2FHomography(5), F2FHomography(HomographyD.random(), 5, 10) exp_err_msg = "Tried to perform get\\(\\) out of bounds" for f2f in f2fs: with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(3, 0) with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(-4, 0) with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(0, 3) with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(0, -4) with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(5, 5) with nt.assert_raises_regexp(IndexError, exp_err_msg): f2f.get(-6, -6)
def test_bad_end_time(self): day_1 = day.Day("2014-09-01") day_1.report_start_time("8:00") day_1.report_lunch_duration("1:00") nt.assert_raises_regexp( TypeError, "^Given end time must be a string.", day_1.report_end_time, datetime.time(hour=17)) nt.assert_raises_regexp( errors.BadTimeError, "^Bad end time: \"24:00\"$", day_1.report_end_time, "24:00") nt.assert_raises_regexp( errors.BadTimeError, "^Bad end time: \"17:00:00\"$", day_1.report_end_time, "17:00:00")