def test_favorite_query_expanded_output(executor): set_expanded_output(False) run(executor, '''create table test(a text)''') run(executor, '''insert into test values('abc')''') results = run(executor, "\\fs test-ae select * from test") assert results == ['Saved.'] results = run(executor, "\\f test-ae \G", join=True) expected_results = set([ dedent("""\ > select * from test -[ RECORD 0 ] a | abc """), dedent("""\ > select * from test ***************************[ 1. row ]*************************** a | abc """), ]) set_expanded_output(False) assert results in expected_results results = run(executor, "\\fd test-ae") assert results == ['test-ae: Deleted']
def test_imports(self): src = """\ @import "%(filename)s"; selector1 { rule1: #abc; } """ impsrc = """\ selector-imp { rule-imp: #123; } """ expect = """\ selector-imp { rule-imp: #123; } selector1 { rule1: #abc; } """ src = textwrap.dedent(src) impsrc = textwrap.dedent(impsrc) expect = textwrap.dedent(expect) impfilename = self.create_tempfile(data=impsrc, suffix='.css') src = src % {'filename': os.path.basename(impfilename)} ifilename = self.create_tempfile(data=src, suffix='.css') ofilename = self.create_tempfile(suffix='.css') core.compile(ifilename, ofilename) with open(ofilename, 'r') as f: data = f.read() self.assertEqual(expect, data)
def test_repr(self): data = create_test_data() expected = dedent(""" <xray.Dataset> Dimensions: (dim1: 100, dim2: 50, dim3: 10, time: 20) Coordinates: dim1 X dim2 X dim3 X time X Noncoordinates: var1 0 1 var2 0 1 var3 1 0 Attributes: Empty """).strip() actual = '\n'.join(x.rstrip() for x in repr(data).split('\n')) self.assertEqual(expected, actual) expected = dedent(""" <xray.Dataset> Dimensions: () Coordinates: None Noncoordinates: None Attributes: Empty """).strip() actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n')) self.assertEqual(expected, actual)
def test_uninstall_from_reqs_file(): """ Test uninstall from a requirements file. """ env = reset_env() write_file('test-req.txt', textwrap.dedent("""\ -e %s#egg=initools-dev # and something else to test out: PyLogo<0.4 """ % local_checkout('svn+http://svn.colorstudy.com/INITools/trunk'))) result = run_pip('install', '-r', 'test-req.txt') write_file('test-req.txt', textwrap.dedent("""\ # -f, -i, and --extra-index-url should all be ignored by uninstall -f http://www.example.com -i http://www.example.com --extra-index-url http://www.example.com -e %s#egg=initools-dev # and something else to test out: PyLogo<0.4 """ % local_checkout('svn+http://svn.colorstudy.com/INITools/trunk'))) result2 = run_pip('uninstall', '-r', 'test-req.txt', '-y') assert_all_changes( result, result2, [env.venv/'build', env.venv/'src', env.scratch/'test-req.txt'])
def testSetGlobalStyle(self): try: style.SetGlobalStyle(style.CreateChromiumStyle()) unformatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) expected_formatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style unformatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) expected_formatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def withMaker(cls): """ Implements the SausageFactory design pattern. """ nodeName = cls.__name__.decode("utf-8") if cls.__init__ is object.__init__: names = () else: names = inspect.getargspec(cls.__init__).args[1:] signature = ", ".join(['"Any"'] * (len(names) + 1)) verb = nodeName if getattr(cls, "fromMonte", None) is not None: verb += ".fromMonte" arglist = ", ".join(names) src = """\ @autohelp @audited.DF class %sMaker(Object): def printOn(self, out): out.call(u"print", [StrObject(u"<kernel make%s>")]) @method(%s) def run(self, %s): return %s(%s) """ % (nodeName, nodeName, signature, arglist, verb, arglist) d = globals() exec textwrap.dedent(src) in d cls.nodeMaker = d[nodeName + "Maker"]() return cls
def assert_fromfile(self, parse_func, expected_append=None, append_contents=None): def _do_assert_fromfile(dest, expected, contents): with temporary_file() as fp: fp.write(contents) fp.close() options = parse_func(dest, fp.name) self.assertEqual(expected, options.for_scope('fromfile')[dest]) _do_assert_fromfile(dest='string', expected='jake', contents='jake') _do_assert_fromfile(dest='intvalue', expected=42, contents='42') _do_assert_fromfile(dest='dictvalue', expected={'a': 42, 'b': (1, 2)}, contents=dedent(""" { 'a': 42, 'b': ( 1, 2 ) } """)) _do_assert_fromfile(dest='listvalue', expected=['a', '1', '2'], contents=dedent(""" ['a', 1, 2] """)) expected_append = expected_append or [1, 2, 42] append_contents = append_contents or dedent(""" [ 1, 2, 42 ] """) _do_assert_fromfile(dest='appendvalue', expected=expected_append, contents=append_contents)
def setUp(self): """Create initial data.""" super(TestUploadTranscripts, self).setUp() self.good_srt_file = tempfile.NamedTemporaryFile(suffix='.srt') self.good_srt_file.write(textwrap.dedent(""" 1 00:00:10,500 --> 00:00:13,000 Elephant's Dream 2 00:00:15,000 --> 00:00:18,000 At the left we can see... """)) self.good_srt_file.seek(0) self.bad_data_srt_file = tempfile.NamedTemporaryFile(suffix='.srt') self.bad_data_srt_file.write('Some BAD data') self.bad_data_srt_file.seek(0) self.bad_name_srt_file = tempfile.NamedTemporaryFile(suffix='.BAD') self.bad_name_srt_file.write(textwrap.dedent(""" 1 00:00:10,500 --> 00:00:13,000 Elephant's Dream 2 00:00:15,000 --> 00:00:18,000 At the left we can see... """)) self.bad_name_srt_file.seek(0) self.ufeff_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
def test_load_from_file_with_local_overriding_global(self): # The config data in the local and global files is loaded correctly. # The local data will override the global one content = ''' [A] a=1 b=c [B] b=2''' site_path = touch(content=textwrap.dedent(content)) os.environ["OQ_SITE_CFG_PATH"] = site_path content = ''' [A] a=2 d=e [D] c=d-1 d=4''' local_path = touch(content=textwrap.dedent(content)) os.environ["OQ_LOCAL_CFG_PATH"] = local_path config.cfg.cfg.clear() config.cfg._load_from_file() self.assertEqual(["A", "B", "D"], sorted(config.cfg.cfg)) self.assertEqual({"a": "2", "b": "c", "d": "e"}, config.cfg.cfg.get("A")) self.assertEqual({"b": "2"}, config.cfg.cfg.get("B")) self.assertEqual({"c": "d-1", "d": "4"}, config.cfg.cfg.get("D"))
def createHeader(className, qobject): fileText = createHeadingComment() fileText += textwrap.dedent('''\ #ifndef ''' + className.upper() + '''_H #define ''' + className.upper() + '''_H ''' + includeQObject(qobject) + ''' class ''' + className + extendQObject(qobject) + ''' { ''' + macroQObject(qobject) + ''' public: ''' + className + '''(); ~''' + className + '''(); }; #endif ''') fileText = textwrap.dedent(fileText) if os.path.isfile(className + '.h'): print '\t' + className + '.h already exists' return file = open(className + '.h', 'w') file.write(fileText)
def createInterfaceFile(className): fileText = createHeadingComment() fileText += textwrap.dedent('''\ #ifndef ''' + className.upper() + '''_H #define ''' + className.upper() + '''_H class ''' + className + ''' { public: virtual ~''' + className + '''() {}; public: // signals public: // slots }; #endif ''') fileText = textwrap.dedent(fileText) if os.path.isfile(className + '.h'): return file = open(className + '.h', 'w') file.write(fileText)
def test_sibling_build_files(self): self.add_to_build_file('BUILD', dedent( """ fake(name="base", dependencies=[ ':foo', ]) """)) self.add_to_build_file('BUILD.foo', dedent( """ fake(name="foo", dependencies=[ ':bat', ]) """)) self.add_to_build_file('./BUILD.bar', dedent( """ fake(name="bat") """)) bar_build_file = FilesystemBuildFile(self.build_root, 'BUILD.bar') base_build_file = FilesystemBuildFile(self.build_root, 'BUILD') foo_build_file = FilesystemBuildFile(self.build_root, 'BUILD.foo') address_map = self.build_file_parser.address_map_from_build_file(bar_build_file) addresses = address_map.keys() self.assertEqual({bar_build_file, base_build_file, foo_build_file}, set([address.build_file for address in addresses])) self.assertEqual({'//:base', '//:foo', '//:bat'}, set([address.spec for address in addresses]))
def test_nested_namespaces(self): self.create_file('src/thrift/com/foo/one.thrift', contents=dedent(""" namespace py foo.bar struct One {} """)) self.create_file('src/thrift/com/foo/bar/two.thrift', contents=dedent(""" namespace py foo.bar.baz struct Two {} """)) one = self.make_target(spec='src/thrift/com/foo:one', target_type=PythonThriftLibrary, sources=['one.thrift', 'bar/two.thrift']) _, synthetic_target = self.generate_single_thrift_target(one) self.assertEqual({'foo/__init__.py', 'foo/bar/__init__.py', 'foo/bar/constants.py', 'foo/bar/ttypes.py', 'foo/bar/baz/__init__.py', 'foo/bar/baz/constants.py', 'foo/bar/baz/ttypes.py'}, set(synthetic_target.sources_relative_to_source_root())) self.assert_ns_package(synthetic_target, 'foo') self.assert_leaf_package(synthetic_target, 'foo/bar') self.assert_leaf_package(synthetic_target, 'foo/bar/baz')
def test_files_cache(self): # Given yaml_string = textwrap.dedent("""\ files_cache: "/foo/bar" """) # When config = Configuration.from_yaml_filename(StringIO(yaml_string)) # Then self.assertFalse(config.use_webservice) self.assertEqual(config.repository_cache, "/foo/bar") # Given yaml_string = textwrap.dedent("""\ files_cache: "~/foo/bar/{PLATFORM}" """) # When config = Configuration.from_yaml_filename(StringIO(yaml_string)) # Then self.assertFalse(config.use_webservice) self.assertEqual(config.repository_cache, os.path.expanduser("~/foo/bar/{0}".format(custom_plat)))
def test_sibling_build_files_duplicates(self): # This workspace is malformed, you can't shadow a name in a sibling BUILD file self.add_to_build_file('BUILD', dedent( """ fake(name="base", dependencies=[ ':foo', ]) """)) self.add_to_build_file('BUILD.foo', dedent( """ fake(name="foo", dependencies=[ ':bat', ]) """)) self.add_to_build_file('./BUILD.bar', dedent( """ fake(name="base") """)) with self.assertRaises(BuildFileParser.SiblingConflictException): base_build_file = FilesystemBuildFile(self.build_root, 'BUILD') self.build_file_parser.address_map_from_build_file(base_build_file)
def test_get_html(self): # usual output output = self.the_input.get_html() self.assertEqual( etree.tostring(output), """<div>{\'status\': Status(\'queued\'), \'button_enabled\': True, \'rows\': \'10\', \'queue_len\': \'3\', \'mode\': \'\', \'cols\': \'80\', \'STATIC_URL\': \'/dummy-static/\', \'linenumbers\': \'true\', \'queue_msg\': \'\', \'value\': \'print "good evening"\', \'msg\': u\'Submitted. As soon as a response is returned, this message will be replaced by that feedback.\', \'matlab_editor_js\': \'/dummy-static/js/vendor/CodeMirror/octave.js\', \'hidden\': \'\', \'id\': \'prob_1_2\', \'tabsize\': 4}</div>""" ) # test html, that is correct HTML5 html, but is not parsable by XML parser. old_render_template = self.the_input.capa_system.render_template self.the_input.capa_system.render_template = lambda *args: textwrap.dedent(""" <div class='matlabResponse'><div id='mwAudioPlaceHolder'> <audio controls autobuffer autoplay src='data:audio/wav;base64='>Audio is not supported on this browser.</audio> <div>Right click <a href=https://endpoint.mss-mathworks.com/media/filename.wav>here</a> and click \"Save As\" to download the file</div></div> <div style='white-space:pre' class='commandWindowOutput'></div><ul></ul></div> """).replace('\n', '') output = self.the_input.get_html() self.assertEqual( etree.tostring(output), textwrap.dedent(""" <div class='matlabResponse'><div id='mwAudioPlaceHolder'> <audio src='data:audio/wav;base64=' autobuffer="" controls="" autoplay="">Audio is not supported on this browser.</audio> <div>Right click <a href="https://endpoint.mss-mathworks.com/media/filename.wav">here</a> and click \"Save As\" to download the file</div></div> <div style='white-space:pre' class='commandWindowOutput'/><ul/></div> """).replace('\n', '').replace('\'', '\"') ) # check that exception is raised during parsing for html. self.the_input.capa_system.render_template = lambda *args: "<aaa" with self.assertRaises(etree.XMLSyntaxError): self.the_input.get_html() self.the_input.capa_system.render_template = old_render_template
def test_api_token_authentication(self): # Given yaml_string = textwrap.dedent("""\ authentication: kind: token api_token: ulysse """) # When config = Configuration.from_yaml_filename(StringIO(yaml_string)) # Then self.assertFalse(config.use_webservice) self.assertEqual(config.auth, APITokenAuth("ulysse")) # Given yaml_string = textwrap.dedent("""\ authentication: api_token: ulysse """) # When config = Configuration.from_yaml_filename(StringIO(yaml_string)) # Then self.assertFalse(config.use_webservice) self.assertEqual(config.auth, APITokenAuth("ulysse"))
def test_chained_type_positive(self): # type: () -> None """Positive parser chaining test cases.""" # Setup some common types test_preamble = textwrap.dedent(""" types: string: description: foo cpp_type: foo bson_serialization_type: string serializer: foo deserializer: foo default: foo foo1: description: foo cpp_type: foo bson_serialization_type: chain serializer: foo deserializer: foo default: foo """) # Chaining only self.assert_bind(test_preamble + textwrap.dedent(""" structs: bar1: description: foo strict: false chained_types: - foo1 """))
def test_fix_activate_path(): deployment = Deployment('test') temp = tempfile.NamedTemporaryFile() with open(temp.name, 'w') as fh: fh.write(textwrap.dedent(""" other things VIRTUAL_ENV="/this/path/is/wrong/and/longer/than/new/path" more other things """)) expected = textwrap.dedent(""" other things VIRTUAL_ENV="/usr/share/python/test" more other things """) with patch('dh_virtualenv.deployment.os.path.join', return_value=temp.name): deployment.fix_activate_path() with open(temp.name) as fh: eq_(expected, temp.read())
def test_command_positive(self): # type: () -> None """Positive command tests.""" # Setup some common types test_preamble = textwrap.dedent(""" types: string: description: foo cpp_type: foo bson_serialization_type: string serializer: foo deserializer: foo default: foo """) self.assert_bind(test_preamble + textwrap.dedent(""" commands: foo: description: foo namespace: ignored strict: true fields: foo: string """))
def test_struct_negative(self): # type: () -> None """Negative struct tests.""" # Setup some common types test_preamble = textwrap.dedent(""" types: string: description: foo cpp_type: foo bson_serialization_type: string serializer: foo deserializer: foo default: foo """) # Test array as name self.assert_bind_fail(test_preamble + textwrap.dedent(""" structs: array<foo>: description: foo strict: true fields: foo: string """), idl.errors.ERROR_ID_ARRAY_NOT_VALID_TYPE)
def test_enum_positive(self): # type: () -> None """Positive enum test cases.""" # Test int self.assert_bind( textwrap.dedent(""" enums: foo: description: foo type: int values: v1: 3 v2: 1 v3: 2 """)) # Test string self.assert_bind( textwrap.dedent(""" enums: foo: description: foo type: string values: v1: 0 v2: 1 v3: 2 """))
def run(self): ########################## # Get a list of all known # tests that we can run. ########################## all_tests = self.__gather_tests() ########################## # Setup client/server ########################## print textwrap.dedent(""" ===================================================== Preparing up Server and Client ... ===================================================== """) self.__setup_server() self.__setup_client() ########################## # Run tests ########################## self.__run_tests(all_tests) ########################## # Parse results ########################## if self.mode == "benchmark": print textwrap.dedent(""" ===================================================== Parsing Results ... ===================================================== """) self.__parse_results(all_tests) self.__finish()
def test_check_prog_input(self): config, out, status = self.get_result(textwrap.dedent(''' option("--with-ccache", nargs=1, help="ccache") check_prog("CCACHE", ("known-a",), input="--with-ccache") '''), ['--with-ccache=known-b']) self.assertEqual(status, 0) self.assertEqual(config, {'CCACHE': self.KNOWN_B}) self.assertEqual(out, 'checking for ccache... %s\n' % self.KNOWN_B) script = textwrap.dedent(''' option(env="CC", nargs=1, help="compiler") @depends("CC") def compiler(value): return value[0].split()[0] if value else None check_prog("CC", ("known-a",), input=compiler) ''') config, out, status = self.get_result(script) self.assertEqual(status, 0) self.assertEqual(config, {'CC': self.KNOWN_A}) self.assertEqual(out, 'checking for cc... %s\n' % self.KNOWN_A) config, out, status = self.get_result(script, ['CC=known-b']) self.assertEqual(status, 0) self.assertEqual(config, {'CC': self.KNOWN_B}) self.assertEqual(out, 'checking for cc... %s\n' % self.KNOWN_B) config, out, status = self.get_result(script, ['CC=known-b -m32']) self.assertEqual(status, 0) self.assertEqual(config, {'CC': self.KNOWN_B}) self.assertEqual(out, 'checking for cc... %s\n' % self.KNOWN_B)
def dedent(text): """Equivalent of textwrap.dedent that ignores unindented first line. This means it will still dedent strings like: '''foo is a bar ''' For use in wrap_paragraphs. """ if text.startswith('\n'): # text starts with blank line, don't ignore the first line return textwrap.dedent(text) # split first line splits = text.split('\n',1) if len(splits) == 1: # only one line return textwrap.dedent(text) first, rest = splits # dedent everything but the first line rest = textwrap.dedent(rest) return '\n'.join([first, rest])
def test_py3k_commutative_with_config_disable(self): module = join(HERE, 'regrtest_data', 'py3k_errors_and_warnings.py') rcfile = join(HERE, 'regrtest_data', 'py3k-disabled.rc') cmd = [module, "--msg-template='{msg}'", "--reports=n"] expected = textwrap.dedent(""" ************* Module py3k_errors_and_warnings import missing `from __future__ import absolute_import` Use raise ErrorClass(args) instead of raise ErrorClass, args. Calling a dict.iter*() method print statement used """) self._test_output(cmd + ["--py3k"], expected_output=expected) expected = textwrap.dedent(""" ************* Module py3k_errors_and_warnings Use raise ErrorClass(args) instead of raise ErrorClass, args. Calling a dict.iter*() method print statement used """) self._test_output(cmd + ["--py3k", "--rcfile", rcfile], expected_output=expected) expected = textwrap.dedent(""" ************* Module py3k_errors_and_warnings Use raise ErrorClass(args) instead of raise ErrorClass, args. print statement used """) self._test_output(cmd + ["--py3k", "-E", "--rcfile", rcfile], expected_output=expected) self._test_output(cmd + ["-E", "--py3k", "--rcfile", rcfile], expected_output=expected)
def test_good_txt_transcript(self): good_sjson = _create_file(content=textwrap.dedent("""\ { "start": [ 270, 2720 ], "end": [ 2720, 5430 ], "text": [ "Hi, welcome to Edx.", "Let's start with what is on your screen right now." ] } """)) _upload_sjson_file(good_sjson, self.item.location) self.item.sub = _get_subs_id(good_sjson.name) transcripts = self.item.get_transcripts_info() text, filename, mime_type = self.item.get_transcript(transcripts, transcript_format="txt") expected_text = textwrap.dedent("""\ Hi, welcome to Edx. Let's start with what is on your screen right now.""") self.assertEqual(text, expected_text) self.assertEqual(filename, self.item.sub + '.txt') self.assertEqual(mime_type, 'text/plain; charset=utf-8')
def test_upgrade_from_reqs_file(script): """ Upgrade from a requirements file. """ script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\ PyLogo<0.4 # and something else to test out: INITools==0.3 """)) install_result = script.pip( 'install', '-r', script.scratch_path / 'test-req.txt' ) script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\ PyLogo # and something else to test out: INITools """)) script.pip( 'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt' ) uninstall_result = script.pip( 'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y' ) assert_all_changes( install_result, uninstall_result, [script.venv / 'build', 'cache', script.scratch / 'test-req.txt'], )
def getMissingImportStr(modNameList): """ Given a list of missing module names, returns a nicely-formatted message to the user that gives instructions on how to expand music21 with optional packages. >>> print(common.getMissingImportStr(['matplotlib'])) Certain music21 functions might need the optional package matplotlib; if you run into errors, install it by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html >>> print(common.getMissingImportStr(['matplotlib', 'numpy'])) Certain music21 functions might need these optional packages: matplotlib, numpy; if you run into errors, install them by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html """ if len(modNameList) == 0: return None elif len(modNameList) == 1: return textwrap.dedent( """\ Certain music21 functions might need the optional package %s; if you run into errors, install it by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html""" % modNameList[0] ) else: return textwrap.dedent( """\ Certain music21 functions might need these optional packages: %s; if you run into errors, install them by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html""" % ", ".join(modNameList) )
def test_struct_enum_negative(self): # type: () -> None """Negative enum test cases.""" test_preamble = textwrap.dedent(""" enums: foo: description: foo type: int values: v1: 0 v2: 1 """) # Test array of enums self.assert_bind_fail(test_preamble + textwrap.dedent(""" structs: foo1: description: foo fields: foo1: array<foo> """), idl.errors.ERROR_ID_NO_ARRAY_ENUM) # Test default self.assert_bind_fail(test_preamble + textwrap.dedent(""" structs: foo1: description: foo fields: foo1: type: foo default: 1 """), idl.errors.ERROR_ID_FIELD_MUST_BE_EMPTY_FOR_ENUM)
def test_enable_successfully( self, mock_get_account_information, mock_login, mock_getpass, mock_input, mock_check_call, mock_check_output): mock_input.side_effect = ['*****@*****.**', '123456'] mock_getpass.return_value = 'secret' mock_login.side_effect = [ storeapi.errors.StoreTwoFactorAuthenticationRequired(), None] mock_get_account_information.return_value = {'account_id': 'abcd'} mock_check_call.side_effect = [None, None] mock_check_output.side_effect = [None] self.make_snapcraft_yaml(test_snapcraft_yaml) self.make_travis_yml('after_success: ["<travis-cli-decrypt>"]') travis.enable() # Attenuated credentials requested from the Store. mock_login.assert_called_with( '*****@*****.**', 'secret', one_time_password='******', acls=None, save=False, channels=['edge'], packages=[{'series': '16', 'name': 'foo'}], expires=None, config_fd=None) # Credentials encrypted with travis CLI. mock_check_output.assert_called_with( ['travis', 'encrypt-file', '--force', '--add', 'after_success', '--decrypt-to', travis.LOCAL_CONFIG_FILENAME, mock.ANY, travis.ENCRYPTED_CONFIG_FILENAME], stderr=subprocess.PIPE) # '.travis.yml' updated for snap CI. with open('.travis.yml') as fd: travis_conf = yaml.load(fd) self.assertThat(travis_conf['sudo'], Equals('required')) self.assertThat(travis_conf['services'], Equals(['docker'])) self.assertThat( travis_conf['after_success'], Equals([ '<travis-cli-decrypt>', ])) self.assertThat( travis_conf['deploy'], Equals({ 'skip_cleanup': True, 'provider': 'script', 'script': ( 'docker run -v $(pwd):$(pwd) -t snapcore/snapcraft ' 'sh -c "apt update -qq && ' 'cd $(pwd) && ' 'snapcraft && snapcraft push *.snap --release edge"'), 'on': { 'branch': 'master', }, })) # Descriptive logging ... self.assertThat( self.fake_logger.output, Contains(dedent("""\ Enabling Travis testbeds to push and release 'foo' snaps to edge channel in series '16' Acquiring specific authorization information ... Encrypting authorization for Travis and adjusting project to automatically decrypt and use it during "after_success". Configuring "deploy" phase to build and release the snap in the Store. Done. Now you just have to review and commit changes in your Travis project (`.travis.yml`). Also make sure you add the new `.snapcraft/travis_snapcraft.cfg` file. """))) # noqa TODO this type of test should not be done
FORMULA_TEMPLATE = Template(dedent("""\ class {{ package.name|capitalize }} < Formula include Language::Python::Virtualenv desc "Shiny new formula" homepage "{{ package.homepage }}" url "{{ package.url }}" sha256 "{{ package.checksum }}" {% if python == "python" %} depends_on :python if MacOS.version <= :snow_leopard {% else %} depends_on :python3 {% endif %} {% if resources %} {% for resource in resources %} {% include ResourceTemplate %} {% endfor %} {% endif %} def install {% if python == "python3" %} virtualenv_create(libexec, "python3") {% endif %} virtualenv_install_with_resources end test do false end end """),
layout = html.Div([ dcc.Markdown(dedent(''' # Cytoscape Layouts The layout parameter of `cyto.Cytoscape` takes as argument a dictionary specifying how the nodes should be positioned on the screen. Every graph requires this dictionary with a value specified for the `name` key. It represents a built-in display method, which is one of the following: - `preset` - `random` - `grid` - `circle` - `concentric` - `breadthfirst` - `cose` All those layouts (along with their options), are described in the [official Cytoscape documentation](http://js.cytoscape.org/#layouts). There, you can find the exact keys accepted by your dictionary, enabling advanced fine-tuning (demonstrated below). If preset is given, the positions will be rendered based on the positions specified in the elements. Otherwise, the positions will be computed by Cytoscape.js behind the scene, based on the given items of the layout dictionary. Let's start with an example of declaring a graph with a preset layout: ''')), Display('''
def get_parser(): epilog = (""" Discard sequences based on whether or not their median k-mer abundance lies above a specified cutoff. Kept sequences will be placed in <fileN>.keep. By default, paired end reads will be considered together; if either read should be kept, both will be kept. (This keeps both reads from a fragment, and helps with retention of repeats.) Unpaired reads are treated individually. If :option:`-p`/`--paired` is set, then proper pairing is required and the script will exit on unpaired reads, although :option:`--unpaired-reads` can be used to supply a file of orphan reads to be read after the paired reads. :option:`--force-single` will ignore all pairing information and treat reads individually. With :option:`-s`/:option:`--savetable`, the k-mer counting table will be saved to the specified file after all sequences have been processed. With :option:`-d`, the k-mer counting table will be saved every d files for multifile runs; if :option:`-s` is set, the specified name will be used, and if not, the name `backup.ct` will be used. :option:`-l`/:option:`--loadtable` will load the specified k-mer counting table before processing the specified files. Note that these tables are are in the same format as those produced by :program:`load-into-counting.py` and consumed by :program:`abundance-dist.py`. To append reads to an output file (rather than overwriting it), send output to STDOUT with `--out -` and use UNIX file redirection syntax (`>>`) to append to the file. Example:: normalize-by-median.py -k 17 tests/test-data/test-abund-read-2.fa Example:: """ " normalize-by-median.py -p -k 17 tests/test-data/test-abund-read-paired.fa" # noqa """ Example:: """ " normalize-by-median.py -p -k 17 -o - tests/test-data/paired.fq >> appended-output.fq" # noqa """ Example:: """ " normalize-by-median.py -k 17 -f tests/test-data/test-error-reads.fq tests/test-data/test-fastq-reads.fq" # noqa """ Example:: """ " normalize-by-median.py -k 17 -d 2 -s test.ct tests/test-data/test-abund-read-2.fa tests/test-data/test-fastq-reads") # noqa parser = build_counting_args( descr="Do digital normalization (remove mostly redundant sequences)", epilog=textwrap.dedent(epilog)) parser.add_argument('-C', '--cutoff', type=int, default=DEFAULT_DESIRED_COVERAGE) parser.add_argument('-p', '--paired', action='store_true', help='require that all sequences be properly paired') parser.add_argument('--force-single', dest='force_single', action='store_true', help='treat all sequences as single-ended/unpaired') parser.add_argument('-u', '--unpaired-reads', metavar="unpaired_reads_filename", help='include a file of unpaired reads to which ' '-p/--paired does not apply.') parser.add_argument('-s', '--savetable', metavar="filename", default='', help='save the k-mer counting table to disk after all' 'reads are loaded.') parser.add_argument('-R', '--report', metavar='filename', type=argparse.FileType('w')) parser.add_argument('-f', '--force', dest='force', help='continue on next file if read errors are \ encountered', action='store_true') parser.add_argument('-o', '--out', metavar="filename", dest='single_output_file', type=argparse.FileType('w'), default=None, help='only output a single file with ' 'the specified filename; use a single dash "-" to ' 'specify that output should go to STDOUT (the ' 'terminal)') parser.add_argument('input_filenames', metavar='input_sequence_filename', help='Input FAST[AQ] sequence filename.', nargs='+') add_loadhash_args(parser) return parser
import io import textwrap from collections import namedtuple import pytest from ibidem.advent_of_code.y2021.dec22 import load, part1, part2, Step, gen_coord, Range Case = namedtuple("Case", ("result", "input")) CASES = (Case( 39, io.StringIO( textwrap.dedent("""\ on x=10..12,y=10..12,z=10..12 on x=11..13,y=11..13,z=11..13 off x=9..11,y=9..11,z=9..11 on x=10..10,y=10..10,z=10..10 """))), Case( 590784, io.StringIO( textwrap.dedent("""\ on x=-20..26,y=-36..17,z=-47..7 on x=-20..33,y=-21..23,z=-26..28 on x=-22..28,y=-29..23,z=-38..16 on x=-46..7,y=-6..46,z=-50..-1 on x=-49..1,y=-3..46,z=-24..28 on x=2..47,y=-22..22,z=-23..27 on x=-27..23,y=-28..26,z=-21..29 on x=-39..5,y=-6..47,z=-3..44 on x=-30..21,y=-8..43,z=-13..34
def test_file_deps_success(rule_runner: RuleRunner) -> None: scalatest_coord = Coordinate(group="org.scalatest", artifact="scalatest_2.13", version="3.2.10") rule_runner.write_files({ "3rdparty/jvm/default.lock": importlib.resources.read_text(*Scalatest.default_lockfile_resource), "BUILD": dedent(f"""\ jvm_artifact( name = 'org.scalatest_scalatest', group = '{scalatest_coord.group}', artifact = '{scalatest_coord.artifact}', version = '{scalatest_coord.version}', ) scala_sources( name='example-sources', dependencies=[':ducks'], ) scalatest_tests( name='example-test', dependencies= [ ':org.scalatest_scalatest', ':example-sources', ], ) file( name="ducks", source="ducks.txt", ) """), "SimpleFileReader.scala": dedent(""" package org.pantsbuild.example import java.nio.file.Files import java.nio.file.Path object SimpleFileReader { def read(): String = Files.readString(Path.of("ducks.txt")) } """), "SimpleSpec.scala": dedent(""" package org.pantsbuild.example; import org.scalatest.funspec.AnyFunSpec import java.nio.file.Files import java.nio.file.Path class SimpleSpec extends AnyFunSpec { describe("Ducks") { it("should be ducks") { val expectedFileContents = "lol ducks" assert(SimpleFileReader.read() == expectedFileContents) assert(Files.readString(Path.of("ducks.txt")) == expectedFileContents) } } } """), "ducks.txt": "lol ducks", }) test_result = run_scalatest_test(rule_runner, "example-test", "SimpleSpec.scala") assert test_result.exit_code == 0 assert "Tests: succeeded 1, failed 0, canceled 0, ignored 0, pending 0" in test_result.stdout assert test_result.xml_results and test_result.xml_results.files
def _patch_sources(self): for patch in self.conan_data.get("patches", {}).get(self.version, []): tools.patch(**patch) # Provide relocatable protobuf::protoc target and Protobuf_PROTOC_EXECUTABLE cache variable # TODO: some of the following logic might be disabled when conan will # allow to create executable imported targets in package_info() protobuf_config_cmake = os.path.join(self._source_subfolder, "cmake", "protobuf-config.cmake.in") tools.replace_in_file( protobuf_config_cmake, "@_protobuf_FIND_ZLIB@", "# BEGIN CONAN PATCH\n#_protobuf_FIND_ZLIB@\n# END CONAN PATCH") exe_ext = ".exe" if self.settings.os == "Windows" else "" protoc_filename = "protoc" + exe_ext module_folder_depth = len( os.path.normpath(self._cmake_install_base_path).split(os.path.sep)) protoc_rel_path = "{}bin/{}".format( "".join(["../"] * module_folder_depth), protoc_filename) protoc_target = textwrap.dedent("""\ if(NOT TARGET protobuf::protoc) find_program(PROTOC_PROGRAM protoc PATHS ENV PATH NO_DEFAULT_PATH) if(NOT PROTOC_PROGRAM) set(PROTOC_PROGRAM \"${{CMAKE_CURRENT_LIST_DIR}}/{protoc_rel_path}\") endif() get_filename_component(PROTOC_PROGRAM \"${{PROTOC_PROGRAM}}\" ABSOLUTE) set(Protobuf_PROTOC_EXECUTABLE ${{PROTOC_PROGRAM}} CACHE FILEPATH \"The protoc compiler\") add_executable(protobuf::protoc IMPORTED) set_property(TARGET protobuf::protoc PROPERTY IMPORTED_LOCATION ${{Protobuf_PROTOC_EXECUTABLE}}) endif() """.format(protoc_rel_path=protoc_rel_path)) tools.replace_in_file( protobuf_config_cmake, "include(\"${CMAKE_CURRENT_LIST_DIR}/protobuf-targets.cmake\")", protoc_target) # Set DYLD_LIBRARY_PATH in command line to avoid issues with shared protobuf # (even with virtualrunenv, this fix might be required due to SIP) # Only works with cmake, cmake_find_package or cmake_find_package_multi generators if tools.is_apple_os(self.settings.os): tools.replace_in_file( protobuf_config_cmake, "add_custom_command(", ("set(CUSTOM_DYLD_LIBRARY_PATH ${CONAN_LIB_DIRS} ${Protobuf_LIB_DIRS} ${Protobuf_LIB_DIRS_RELEASE} ${Protobuf_LIB_DIRS_DEBUG} ${Protobuf_LIB_DIRS_RELWITHDEBINFO} ${Protobuf_LIB_DIRS_MINSIZEREL})\n" "string(REPLACE \";\" \":\" CUSTOM_DYLD_LIBRARY_PATH \"${CUSTOM_DYLD_LIBRARY_PATH}\")\n" "add_custom_command(")) tools.replace_in_file( protobuf_config_cmake, "COMMAND protobuf::protoc", "COMMAND ${CMAKE_COMMAND} -E env \"DYLD_LIBRARY_PATH=${CUSTOM_DYLD_LIBRARY_PATH}\" $<TARGET_FILE:protobuf::protoc>" ) # Disable a potential warning in protobuf-module.cmake.in # TODO: remove this patch? Is it really useful? protobuf_module_cmake = os.path.join(self._source_subfolder, "cmake", "protobuf-module.cmake.in") tools.replace_in_file( protobuf_module_cmake, "if(DEFINED Protobuf_SRC_ROOT_FOLDER)", "if(0)\nif(DEFINED Protobuf_SRC_ROOT_FOLDER)", ) tools.replace_in_file( protobuf_module_cmake, "# Define upper case versions of output variables", "endif()", )
async def superstarify( self, ctx: Context, member: Member, duration: utils.Expiry, reason: str = None ) -> None: """ Temporarily force a random superstar name (like Taylor Swift) to be the user's nickname. A unit of time should be appended to the duration. Units (∗case-sensitive): \u2003`y` - years \u2003`m` - months∗ \u2003`w` - weeks \u2003`d` - days \u2003`h` - hours \u2003`M` - minutes∗ \u2003`s` - seconds Alternatively, an ISO 8601 timestamp can be provided for the duration. An optional reason can be provided. If no reason is given, the original name will be shown in a generated reason. """ if await utils.has_active_infraction(ctx, member, "superstar"): return # Post the infraction to the API reason = reason or f"old nick: {member.display_name}" infraction = await utils.post_infraction(ctx, member, "superstar", reason, duration) old_nick = member.display_name forced_nick = self.get_nick(infraction["id"], member.id) expiry_str = format_infraction(infraction["expires_at"]) # Apply the infraction and schedule the expiration task. self.mod_log.ignore(constants.Event.member_update, member.id) await member.edit(nick=forced_nick, reason=reason) self.schedule_task(ctx.bot.loop, infraction["id"], infraction) # Send a DM to the user to notify them of their new infraction. await utils.notify_infraction( user=member, infr_type="Superstarify", expires_at=expiry_str, icon_url=utils.INFRACTION_ICONS["superstar"][0], reason=f"Your nickname didn't comply with our [nickname policy]({NICKNAME_POLICY_URL})." ) # Send an embed with the infraction information to the invoking context. embed = Embed( title="Congratulations!", colour=constants.Colours.soft_orange, description=( f"Your previous nickname, **{old_nick}**, " f"was so bad that we have decided to change it. " f"Your new nickname will be **{forced_nick}**.\n\n" f"You will be unable to change your nickname until **{expiry_str}**.\n\n" "If you're confused by this, please read our " f"[official nickname policy]({NICKNAME_POLICY_URL})." ) ) await ctx.send(embed=embed) # Log to the mod log channel. await self.mod_log.send_log_message( icon_url=utils.INFRACTION_ICONS["superstar"][0], colour=Colour.gold(), title="Member achieved superstardom", thumbnail=member.avatar_url_as(static_format="png"), text=textwrap.dedent(f""" Member: {member.mention} (`{member.id}`) Actor: {ctx.message.author} Reason: {reason} Expires: {expiry_str} Old nickname: `{old_nick}` New nickname: `{forced_nick}` """), footer=f"ID {infraction['id']}" )
def test_build_modules(self): conanfile = textwrap.dedent(""" import os from conans import ConanFile, CMake class Conan(ConanFile): name = "test" version = "1.0" exports_sources = ["my-module.cmake", "FindFindModule.cmake"] def package(self): self.copy("*.cmake", dst="share/cmake") def package_info(self): # Only first module is defined # (the other one should be found by CMAKE_MODULE_PATH in builddirs) builddir = os.path.join("share", "cmake") module = os.path.join(builddir, "my-module.cmake") self.cpp_info.build_modules.append(module) self.cpp_info.builddirs = [builddir] """) # This is a module that has other find_package() calls my_module = textwrap.dedent(""" find_package(FindModule REQUIRED) """) # This is a module that defines some functionality find_module = textwrap.dedent(""" function(conan_message MESSAGE_OUTPUT) message(${ARGV${0}}) endfunction() """) client = TestClient() client.save({ "conanfile.py": conanfile, "my-module.cmake": my_module, "FindFindModule.cmake": find_module }) client.run("create .") ref = ConanFileReference("test", "1.0", None, None) pref = PackageReference(ref, NO_SETTINGS_PACKAGE_ID, None) package_path = client.cache.package_layout(ref).package(pref) modules_path = os.path.join(package_path, "share", "cmake") self.assertEqual(set(os.listdir(modules_path)), {"FindFindModule.cmake", "my-module.cmake"}) consumer = textwrap.dedent(""" from conans import ConanFile, CMake class Conan(ConanFile): name = "consumer" version = "1.0" settings = "os", "compiler", "build_type", "arch" exports_sources = ["CMakeLists.txt"] generators = "cmake_find_package_multi" requires = "test/1.0" def build(self): cmake = CMake(self) cmake.configure() cmake.build() """) cmakelists = textwrap.dedent(""" cmake_minimum_required(VERSION 3.0) project(test) find_package(test) conan_message("Printing using a external module!") """) client.save({"conanfile.py": consumer, "CMakeLists.txt": cmakelists}) client.run("create .") self.assertIn("Printing using a external module!", client.out)
def rdlfc_to_html(text, node=None): """ Convert an RDLFormatCode string to HTML """ # -------------------------------------------------------------------------- # Remove any common indentation # -------------------------------------------------------------------------- linelist = text.splitlines() text = ( linelist[0].lstrip() + "\n" + textwrap.dedent("\n".join(linelist[1:])) ) # -------------------------------------------------------------------------- # Parse and replace RDLFormatCode Tags # -------------------------------------------------------------------------- token_spec = [ ('b', r'\[b\]'), ('xb', r'\[/b\]'), ('i', r'\[i\]'), ('xi', r'\[/i\]'), ('u', r'\[u\]'), ('xu', r'\[/u\]'), ('color', r'\[color=[^\]]+\]'), ('xcolor', r'\[/color\]'), ('size', r'\[size=[^\]]+\]'), ('xsize', r'\[/size\]'), ('plainurl', r'\[url\].*?\[/url\]'), ('url', r'\[url=[^\]]+\]'), ('xurl', r'\[/url\]'), ('email', r'\[email\].*?\[/email\]'), ('img', r'\[img\].*?\[/img\]'), ('code', r'\[code\].*?\[/code\]'), ('list', r'\[list(?:=[^\]]+)?\]'), ('bullet', r'\[\*\]'), ('xlist', r'\[/list\]'), ('quote', r'\[quote\]'), ('xquote', r'\[/quote\]'), ('br', r'\[br\]'), ('lb', r'\[lb\]'), ('rb', r'\[rb\]'), ('p', r'\[p\]'), ('xp', r'\[/p\]'), ('sp', r'\[sp\]'), ('index', r'\[index\]'), ('index_parent', r'\[index_parent\]'), ('name', r'\[name\]'), ('instname', r'\[instname\]'), ] tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_spec) pos = 0 text_segs = [] is_first_bullet = [] list_end_tag = [] for m in re.finditer(tok_regex, text, re.DOTALL): start = m.start() end = m.end() # Emit prior text if start != pos: text_segs.append(text[pos:start]) pos = end if m.lastgroup == 'b': text_segs.append("<b>") elif m.lastgroup == 'xb': text_segs.append("</b>") elif m.lastgroup == 'i': text_segs.append("<i>") elif m.lastgroup == 'xi': text_segs.append("</i>") elif m.lastgroup == 'u': text_segs.append("<u>") elif m.lastgroup == 'xu': text_segs.append("</u>") elif m.lastgroup == 'color': m2 = re.match(r'\[color=([^\]]+)\]', m.group(0)) text_segs.append('<span style="color:%s">' % m2.group(1)) elif m.lastgroup == 'xcolor': text_segs.append('</span>') elif m.lastgroup == 'size': m2 = re.match(r'\[size=([^\]]+)\]', m.group(0)) text_segs.append('<span style="font-size:%s">' % m2.group(1)) elif m.lastgroup == 'xsize': text_segs.append('</span>') elif m.lastgroup == 'plainurl': m2 = re.match(r'\[url\](.*?)\[/url\]', m.group(0), re.DOTALL) text_segs.append('<a href="%s">%s</a>' % (m2.group(1).strip(), m2.group(1).strip())) elif m.lastgroup == 'url': m2 = re.match(r'\[url=([^\]]+)\]', m.group(0)) text_segs.append('<a href="%s">' % m2.group(1).strip()) elif m.lastgroup == 'xurl': text_segs.append('</a>') elif m.lastgroup == 'email': m2 = re.match(r'\[email\](.*?)\[/email\]', m.group(0), re.DOTALL) text_segs.append('<a href="mailto:%s">%s</a>' % (m2.group(1).strip(), m2.group(1).strip())) elif m.lastgroup == 'img': m2 = re.match(r'\[img\](.*?)\[/img\]', m.group(0), re.DOTALL) text_segs.append('<img src="%s">' % m2.group(1)) elif m.lastgroup == 'code': m2 = re.match(r'\[code\](.*?)\s*\[/code\]', m.group(0), re.DOTALL) text_segs.append('<code>%s</code>' % m2.group(1)) elif m.lastgroup == 'list': # List start tag m2 = re.match(r'\[list(?:=([^\]]+))?\]', m.group(0)) ltype = m2.group(1) if ltype is None: text_segs.append('<ul>') is_first_bullet.append(True) list_end_tag.append('</ul>') elif ltype.strip() in ("1", "A", "a", "I", "i"): text_segs.append('<ol type="%s">' % ltype.strip()) is_first_bullet.append(True) list_end_tag.append('</ol>') else: # Bad type. re-emit erronous list tag text_segs.append(m.group(0)) elif m.lastgroup == 'bullet': if len(is_first_bullet) == 0: # Not inside a list tag. Re-emit erronous tag text_segs.append("\\[\\*\\]") else: if not is_first_bullet[-1]: text_segs.append("</li>") is_first_bullet[-1] = False text_segs.append("<li>") elif m.lastgroup == 'xlist': if len(list_end_tag) == 0: # Not inside a list tag. Re-emit erronous tag text_segs.append(m.group(0)) else: if not is_first_bullet[-1]: text_segs.append("</li>") text_segs.append(list_end_tag[-1]) is_first_bullet.pop() list_end_tag.pop() elif m.lastgroup == 'quote': text_segs.append('"') elif m.lastgroup == 'xquote': text_segs.append('"') elif m.lastgroup == 'br': text_segs.append("<br>") elif m.lastgroup == 'lb': text_segs.append("\\[") elif m.lastgroup == 'rb': text_segs.append("\\]") elif m.lastgroup == 'p': text_segs.append("\n\n<p>") elif m.lastgroup == 'xp': text_segs.append("</p>") elif m.lastgroup == 'sp': text_segs.append(" ") elif m.lastgroup == 'index': if (node is not None) and node.inst.is_array: subscripts = [] if node.current_idx is None: # Index is not known. Use range for dim in node.inst.array_dimensions: subscripts.append("[0:%d]" % dim) else: # Index is known for idx in node.current_idx: subscripts.append("[%d]" % idx) range_str = "".join(subscripts) text_segs.append("<span class='rdlfc-index'>%s</span>" % range_str) else: text_segs.append(m.group(0)) elif m.lastgroup == 'index_parent': if (node is not None) and (node.parent is not None) and node.parent.inst.is_array: subscripts = [] if node.parent.current_idx is None: # Index is not known. Use range for dim in node.parent.inst.array_dimensions: subscripts.append("[0:%d]" % dim) else: # Index is known for idx in node.parent.current_idx: subscripts.append("[%d]" % idx) range_str = "".join(subscripts) text_segs.append("<span class='rdlfc-index_parent'>%s</span>" % range_str) else: text_segs.append(m.group(0)) elif m.lastgroup == 'name': if node is not None: text_segs.append(node.get_property("name")) else: text_segs.append(m.group(0)) elif m.lastgroup == 'instname': if node is not None: text_segs.append(node.inst.inst_name) else: text_segs.append(m.group(0)) # Emit trailing text text_segs.append(text[pos:]) text_out = "".join(text_segs) #--------------------------------------------------------------------------- # Pass through markdown processor #--------------------------------------------------------------------------- text_out = markdown.markdown(text_out) return text_out
def test_cmake_find_package_system_libs(self): conanfile = textwrap.dedent(""" from conans import ConanFile, tools class Test(ConanFile): name = "Test" version = "0.1" settings = "build_type" def package_info(self): self.cpp_info.libs = ["lib1"] if self.settings.build_type == "Debug": self.cpp_info.system_libs.append("sys1d") else: self.cpp_info.system_libs.append("sys1") """) client = TestClient() client.save({"conanfile.py": conanfile}) client.run("export .") conanfile = textwrap.dedent(""" [requires] Test/0.1 [generators] cmake_find_package_multi """) cmakelists_release = textwrap.dedent(""" cmake_minimum_required(VERSION 3.1) project(consumer CXX) set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}) set(CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR}) find_package(Test) message("System libs: ${Test_SYSTEM_LIBS_RELEASE}") message("Libraries to Link: ${Test_LIBS_RELEASE}") get_target_property(tmp Test::Test INTERFACE_LINK_LIBRARIES) message("Target libs: ${tmp}") """) cmakelists_debug = textwrap.dedent(""" cmake_minimum_required(VERSION 3.1) project(consumer CXX) set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}) set(CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR}) find_package(Test) message("System libs: ${Test_SYSTEM_LIBS_DEBUG}") message("Libraries to Link: ${Test_LIBS_DEBUG}") get_target_property(tmp Test::Test INTERFACE_LINK_LIBRARIES) message("Target libs: ${tmp}") """) for build_type in ["Release", "Debug"]: cmakelists = cmakelists_release if build_type == "Release" else cmakelists_debug client.save( { "conanfile.txt": conanfile, "CMakeLists.txt": cmakelists }, clean_first=True) client.run( "install conanfile.txt --build missing -s build_type=%s" % build_type) client.run_command( 'cmake . -DCMAKE_BUILD_TYPE={0}'.format(build_type)) library_name = "sys1d" if build_type == "Debug" else "sys1" self.assertIn("System libs: %s" % library_name, client.out) self.assertIn("Libraries to Link: lib1", client.out) self.assertNotIn( "-- Library %s not found in package, might be system one" % library_name, client.out) if build_type == "Release": target_libs = "$<$<CONFIG:Release>:lib1;sys1;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;" \ "$<$<CONFIG:RelWithDebInfo>:;>;" \ "$<$<CONFIG:MinSizeRel>:;>;" \ "$<$<CONFIG:Debug>:;>" else: target_libs = "$<$<CONFIG:Release>:;>;" \ "$<$<CONFIG:RelWithDebInfo>:;>;" \ "$<$<CONFIG:MinSizeRel>:;>;" \ "$<$<CONFIG:Debug>:lib1;sys1d;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;" \ "$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>" self.assertIn("Target libs: %s" % target_libs, client.out)
def handle_application_arguments(param): parser = argparse.ArgumentParser( prefix_chars='-+', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ DCASE 2018 Task 5: Monitoring of domestic activities based on multi-channel acoustics Baseline system --------------------------------------------- Author: Gert Dekkers ( [email protected] ) and Toni Heittola ( [email protected] ) KU Leuven University / Advanced Integrated Sensing lab (ADVISE) Tampere University of Technology / Audio Research Group ''')) # Setup argument handling parser.add_argument( '-v', '--version', action='version', version='%(prog)s ' + __version__ ) parser.add_argument( '-m', '--mode', choices=('dev', 'eval'), default=None, help="selector for application operation mode", required=False, dest='mode', type=str ) parser.add_argument( '-o', '--overwrite', help='overwrite mode', dest='overwrite', action='store_true', required=False ) parser.add_argument( '-p', '--show_parameters', help='show active application parameter set', dest='show_parameters', action='store_true', required=False ) parser.add_argument( '-d', '--dataset', help='download dataset to given path and exit', dest='dataset_path', required=False, type=str ) parser.add_argument( '-s', '--parameter_set', help='Parameter set id', dest='parameter_set', required=False, type=str ) # Parse arguments args = parser.parse_args() if args.parameter_set: # Set parameter set param['active_set'] = args.parameter_set param.update_parameter_set(args.parameter_set) if args.overwrite: # Inject overwrite into parameters param['general']['overwrite'] = True if args.show_parameters: # Process parameters, and clean up parameters a bit for showing param_ = copy.deepcopy(param) del param_['sets'] del param_['defaults'] for section in param_: if section.endswith('_method_parameters'): param_[section] = {} param_.log() sys.exit(0) if args.dataset_path: # Make sure given path exists dcase_util.utils.Path().create( paths=args.dataset_path ) # Get dataset and initialize dcase_util.datasets.dataset_factory( dataset_class_name=param.get_path('dataset.parameters.dataset'), data_path=args.dataset_path, ).initialize().log() sys.exit(0) return args
async def infraction_edit( self, ctx: Context, infraction_id: t.Union[ int, allowed_strings("l", "last", "recent")], # noqa: F821 duration: t.Union[Expiry, allowed_strings("p", "permanent"), None], # noqa: F821 *, reason: str = None) -> None: """ Edit the duration and/or the reason of an infraction. Durations are relative to the time of updating and should be appended with a unit of time. Units (∗case-sensitive): \u2003`y` - years \u2003`m` - months∗ \u2003`w` - weeks \u2003`d` - days \u2003`h` - hours \u2003`M` - minutes∗ \u2003`s` - seconds Use "l", "last", or "recent" as the infraction ID to specify that the most recent infraction authored by the command invoker should be edited. Use "p" or "permanent" to mark the infraction as permanent. Alternatively, an ISO 8601 timestamp can be provided for the duration. """ if duration is None and reason is None: # Unlike UserInputError, the error handler will show a specified message for BadArgument raise commands.BadArgument( "Neither a new expiry nor a new reason was specified.") # Retrieve the previous infraction for its information. if isinstance(infraction_id, str): params = {"actor__id": ctx.author.id, "ordering": "-inserted_at"} infractions = await self.bot.api_client.get("bot/infractions", params=params) if infractions: old_infraction = infractions[0] infraction_id = old_infraction["id"] else: await ctx.send( ":x: Couldn't find most recent infraction; you have never given an infraction." ) return else: old_infraction = await self.bot.api_client.get( f"bot/infractions/{infraction_id}") request_data = {} confirm_messages = [] log_text = "" if duration is not None and not old_infraction['active']: if reason is None: await ctx.send( ":x: Cannot edit the expiration of an expired infraction.") return confirm_messages.append( "expiry unchanged (infraction already expired)") elif isinstance(duration, str): request_data['expires_at'] = None confirm_messages.append("marked as permanent") elif duration is not None: request_data['expires_at'] = duration.isoformat() expiry = time.format_infraction_with_duration( request_data['expires_at']) confirm_messages.append(f"set to expire on {expiry}") else: confirm_messages.append("expiry unchanged") if reason: request_data['reason'] = reason confirm_messages.append("set a new reason") log_text += f""" Previous reason: {old_infraction['reason']} New reason: {reason} """.rstrip() else: confirm_messages.append("reason unchanged") # Update the infraction new_infraction = await self.bot.api_client.patch( f'bot/infractions/{infraction_id}', json=request_data, ) # Re-schedule infraction if the expiration has been updated if 'expires_at' in request_data: # A scheduled task should only exist if the old infraction wasn't permanent if old_infraction['expires_at']: self.infractions_cog.scheduler.cancel(new_infraction['id']) # If the infraction was not marked as permanent, schedule a new expiration task if request_data['expires_at']: self.infractions_cog.schedule_expiration(new_infraction) log_text += f""" Previous expiry: {old_infraction['expires_at'] or "Permanent"} New expiry: {new_infraction['expires_at'] or "Permanent"} """.rstrip() changes = ' & '.join(confirm_messages) await ctx.send( f":ok_hand: Updated infraction #{infraction_id}: {changes}") # Get information about the infraction's user user_id = new_infraction['user'] user = ctx.guild.get_member(user_id) if user: user_text = messages.format_user(user) thumbnail = user.avatar_url_as(static_format="png") else: user_text = f"<@{user_id}>" thumbnail = None await self.mod_log.send_log_message(icon_url=constants.Icons.pencil, colour=discord.Colour.blurple(), title="Infraction edited", thumbnail=thumbnail, text=textwrap.dedent(f""" Member: {user_text} Actor: <@{new_infraction['actor']}> Edited by: {ctx.message.author.mention}{log_text} """))
def test_native_export_multi(self): """ bye depends on hello. Both use find_package in their CMakeLists.txt The consumer depends on bye, using the cmake_find_package_multi generator """ c = TestClient() project_folder_name = "project_targets" assets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets/cmake_find_package_multi") c.copy_from_assets(assets_path, ["bye", "hello", project_folder_name]) # Create packages for hello and bye for p in ("hello", "bye"): for bt in ("Debug", "Release"): c.run("create {} user/channel -s build_type={}".format(p, bt)) with c.chdir(project_folder_name): # Save conanfile and example conanfile = textwrap.dedent(""" [requires] bye/1.0@user/channel [generators] cmake_find_package_multi """) example_cpp = textwrap.dedent(""" #include <iostream> #include "bye.h" int main() { bye(); } """) c.save({"conanfile.txt": conanfile, "example.cpp": example_cpp}) with c.chdir("build"): for bt in ("Debug", "Release"): c.run( "install .. user/channel -s build_type={}".format(bt)) # Test that we are using find_dependency with the NO_MODULE option # to skip finding first possible FindBye somewhere self.assertIn( "find_dependency(hello REQUIRED NO_MODULE)", load(os.path.join(c.current_folder, "byeConfig.cmake"))) if platform.system() == "Windows": c.run_command('cmake .. -G "Visual Studio 15 Win64"') c.run_command('cmake --build . --config Debug') c.run_command('cmake --build . --config Release') c.run_command('Debug\\example.exe') self.assertIn("Hello World Debug!", c.out) self.assertIn("bye World Debug!", c.out) c.run_command('Release\\example.exe') self.assertIn("Hello World Release!", c.out) self.assertIn("bye World Release!", c.out) else: for bt in ("Debug", "Release"): c.run_command( 'cmake .. -DCMAKE_BUILD_TYPE={}'.format(bt)) c.run_command('cmake --build .') c.run_command('./example') self.assertIn("Hello World {}!".format(bt), c.out) self.assertIn("bye World {}!".format(bt), c.out) os.remove(os.path.join(c.current_folder, "example"))
)) self[start.name] = start self.relate(start) if __name__ == '__main__': import sys import codecs from textwrap import dedent from this import s as zen_encoded from openpack.basepack import CoreProperties trun = dedent(""" <w:p> <w:r> <w:t>%s</w:t> </w:r> </w:p> """).lstrip() raw_zen = codecs.decode(zen_encoded, 'rot13') zen = "".join(trun % line for line in raw_zen.splitlines()) body = (dedent(""" <?xml version="1.0" encoding="utf-8"?> <w:document xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main"> <w:body> <w:p> <w:r><w:t>Generated by:</w:t></w:r> </w:p> <w:p>
class IndexOpsMixin(object): """ common ops mixin to support a unified interface / docs for Series / Index """ # ndarray compatibility __array_priority__ = 1000 def transpose(self, *args, **kwargs): """ return the transpose, which is by definition self """ nv.validate_transpose(args, kwargs) return self T = property(transpose, doc="return the transpose, which is by " "definition self") @property def _is_homogeneous_type(self): """Whether the object has a single dtype. By definition, Series and Index are always considered homogeneous. A MultiIndex may or may not be homogeneous, depending on the dtypes of the levels. See Also -------- DataFrame._is_homogeneous_type MultiIndex._is_homogeneous_type """ return True @property def shape(self): """ return a tuple of the shape of the underlying data """ return self._values.shape @property def ndim(self): """ return the number of dimensions of the underlying data, by definition 1 """ return 1 def item(self): """ return the first element of the underlying data as a python scalar """ try: return self.values.item() except IndexError: # copy numpy's message here because Py26 raises an IndexError raise ValueError('can only convert an array of size 1 to a ' 'Python scalar') @property def data(self): """ return the data pointer of the underlying data """ warnings.warn("{obj}.data is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self.values.data @property def itemsize(self): """ return the size of the dtype of the item of the underlying data """ warnings.warn("{obj}.itemsize is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self._ndarray_values.itemsize @property def nbytes(self): """ return the number of bytes in the underlying data """ return self._values.nbytes @property def strides(self): """ return the strides of the underlying data """ warnings.warn("{obj}.strides is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self._ndarray_values.strides @property def size(self): """ return the number of elements in the underlying data """ return self._values.size @property def flags(self): """ return the ndarray.flags for the underlying data """ warnings.warn("{obj}.flags is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self.values.flags @property def base(self): """ return the base object if the memory of the underlying data is shared """ warnings.warn("{obj}.base is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self.values.base @property def _ndarray_values(self): # type: () -> np.ndarray """The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. - categorical -> codes """ if is_extension_array_dtype(self): return self.values._ndarray_values return self.values @property def empty(self): return not self.size def max(self): """ Return the maximum value of the Index. Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.max() ('b', 2) """ return nanops.nanmax(self.values) def argmax(self, axis=None): """ return a ndarray of the maximum argument indexer See also -------- numpy.ndarray.argmax """ return nanops.nanargmax(self.values) def min(self): """ Return the minimum value of the Index. Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1) """ return nanops.nanmin(self.values) def argmin(self, axis=None): """ return a ndarray of the minimum argument indexer See also -------- numpy.ndarray.argmin """ return nanops.nanargmin(self.values) def tolist(self): """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) See Also -------- numpy.ndarray.tolist """ if is_datetimelike(self._values): return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: return self._values.tolist() def __iter__(self): """ Return an iterator of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) """ return iter(self.tolist()) @cache_readonly def hasnans(self): """ return if I have any nans; enables various perf speedups """ return bool(isna(self).any()) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform the reduction type operation if we can """ func = getattr(self, name, None) if func is None: raise TypeError("{klass} cannot perform the operation {op}".format( klass=self.__class__.__name__, op=name)) return func(**kwds) def _map_values(self, mapper, na_action=None): """An internal function that maps values using the input correspondence (which can be a dict, Series, or function). Parameters ---------- mapper : function, dict, or Series The input correspondence object na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping function Returns ------- applied : Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ # we can fastpath dict/Series to an efficient map # as we know that we are not going to have to yield # python types if isinstance(mapper, dict): if hasattr(mapper, '__missing__'): # If a dictionary subclass defines a default value method, # convert mapper to a lookup function (GH #15999). dict_with_default = mapper mapper = lambda x: dict_with_default[x] else: # Dictionary does not have a default. Thus it's safe to # convert to an Series for efficiency. # we specify the keys here to handle the # possibility that they are tuples from pandas import Series mapper = Series(mapper) if isinstance(mapper, ABCSeries): # Since values were input this means we came from either # a dict or a series and mapper should be an index if is_extension_type(self.dtype): values = self._values else: values = self.values indexer = mapper.index.get_indexer(values) new_values = algorithms.take_1d(mapper._values, indexer) return new_values # we must convert to python types if is_extension_type(self.dtype): values = self._values if na_action is not None: raise NotImplementedError map_f = lambda values, f: values.map(f) else: values = self.astype(object) values = getattr(values, 'values', values) if na_action == 'ignore': def map_f(values, f): return lib.map_infer_mask(values, f, isna(values).view(np.uint8)) else: map_f = lib.map_infer # mapper is a function new_values = map_f(values, mapper) return new_values def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : boolean, default False If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True Sort by values. ascending : boolean, default False Sort in ascending order. bins : integer, optional Rather than count values, group them into half-open bins, a convenience for ``pd.cut``, only works with numeric data. dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.count: number of non-NA elements in a Series DataFrame.count: number of non-NA elements in a DataFrame Examples -------- >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) >>> index.value_counts() 3.0 2 4.0 1 2.0 1 1.0 1 dtype: int64 With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) >>> s.value_counts(normalize=True) 3.0 0.4 4.0 0.2 2.0 0.2 1.0 0.2 dtype: float64 **bins** Bins can be useful for going from a continuous variable to a categorical variable; instead of counting unique apparitions of values, divide the index in the specified number of half-open bins. >>> s.value_counts(bins=3) (2.0, 3.0] 2 (0.996, 2.0] 2 (3.0, 4.0] 1 dtype: int64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> s.value_counts(dropna=False) 3.0 2 NaN 1 4.0 1 2.0 1 1.0 1 dtype: int64 """ from pandas.core.algorithms import value_counts result = value_counts(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna) return result def unique(self): values = self._values if hasattr(values, 'unique'): result = values.unique() else: from pandas.core.algorithms import unique1d result = unique1d(values) return result def nunique(self, dropna=True): """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : boolean, default True Don't include NaN in the count. Returns ------- nunique : int """ uniqs = self.unique() n = len(uniqs) if dropna and isna(uniqs).any(): n -= 1 return n @property def is_unique(self): """ Return boolean if values in the object are unique Returns ------- is_unique : boolean """ return self.nunique() == len(self) @property def is_monotonic(self): """ Return boolean if values in the object are monotonic_increasing .. versionadded:: 0.19.0 Returns ------- is_monotonic : boolean """ from pandas import Index return Index(self).is_monotonic is_monotonic_increasing = is_monotonic @property def is_monotonic_decreasing(self): """ Return boolean if values in the object are monotonic_decreasing .. versionadded:: 0.19.0 Returns ------- is_monotonic_decreasing : boolean """ from pandas import Index return Index(self).is_monotonic_decreasing def memory_usage(self, deep=False): """ Memory usage of the values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy See Also -------- numpy.ndarray.nbytes """ if hasattr(self.values, 'memory_usage'): return self.values.memory_usage(deep=deep) v = self.values.nbytes if deep and is_object_dtype(self) and not PYPY: v += lib.memory_usage_of_objects(self.values) return v @Substitution(values='', order='', size_hint='', sort=textwrap.dedent("""\ sort : boolean, default False Sort `uniques` and shuffle `labels` to maintain the relationship. """)) @Appender(algorithms._shared_docs['factorize']) def factorize(self, sort=False, na_sentinel=-1): return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel) _shared_docs['searchsorted'] = ( """Find indices where elements should be inserted to maintain order. Find the indices into a sorted %(klass)s `self` such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Parameters ---------- value : array_like Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort `self` into ascending order. They are typically the result of ``np.argsort``. Returns ------- indices : array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted Notes ----- Binary search is used to find the required insertion points. Examples -------- >>> x = pd.Series([1, 2, 3]) >>> x 0 1 1 2 2 3 dtype: int64 >>> x.searchsorted(4) array([3]) >>> x.searchsorted([0, 4]) array([0, 3]) >>> x.searchsorted([1, 3], side='left') array([0, 2]) >>> x.searchsorted([1, 3], side='right') array([1, 3]) >>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True) [apple, bread, bread, cheese, milk] Categories (4, object): [apple < bread < cheese < milk] >>> x.searchsorted('bread') array([1]) # Note: an array, not a scalar >>> x.searchsorted(['bread'], side='right') array([3]) """) @Substitution(klass='IndexOpsMixin') @Appender(_shared_docs['searchsorted']) def searchsorted(self, value, side='left', sorter=None): # needs coercion on the key (DatetimeIndex does already) return self.values.searchsorted(value, side=side, sorter=sorter) def drop_duplicates(self, keep='first', inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(self, ABCIndexClass): if self.is_unique: return self._shallow_copy() duplicated = self.duplicated(keep=keep) result = self[np.logical_not(duplicated)] if inplace: return self._update_inplace(result) else: return result def duplicated(self, keep='first'): from pandas.core.algorithms import duplicated if isinstance(self, ABCIndexClass): if self.is_unique: return np.zeros(len(self), dtype=np.bool) return duplicated(self, keep=keep) else: return self._constructor(duplicated(self, keep=keep), index=self.index).__finalize__(self) # ---------------------------------------------------------------------- # abstracts def _update_inplace(self, result, **kwargs): raise AbstractMethodError(self)
import os import sys import textwrap import platform import signal # requirements try: from gi.repository import Gtk except ImportError as e: import_error_help = { 'Windows': { 'gi': textwrap.dedent(""" Please install GTK3. This can be a little tricky on windows. The most up to date help should be avaiable here: http://stackoverflow.com/a/6008390/1695680 """), } } if platform.system() in import_error_help\ and e.name in import_error_help[platform.system()]: print("Fatal Error:") print(e.msg) print(import_error_help[platform.system()][e.name]) else: raise(e) sys.exit()
remaining = "Inactive" if expires_at is None: expires = "*Permanent*" else: date_from = datetime.strptime(created, time.INFRACTION_FORMAT) expires = time.format_infraction_with_duration( expires_at, date_from) lines = textwrap.dedent(f""" {"**===============**" if active else "==============="} Status: {"__**Active**__" if active else "Inactive"} User: {user_str} Type: **{infraction["type"]}** Shadow: {infraction["hidden"]} Created: {created} Expires: {expires} Remaining: {remaining} Actor: <@{infraction["actor"]["id"]}> ID: `{infraction["id"]}` Reason: {infraction["reason"] or "*None*"} {"**===============**" if active else "==============="} """) return lines.strip() # endregion # This cannot be static (must have a __func__ attribute). async def cog_check(self, ctx: Context) -> bool: """Only allow moderators inside moderator channels to invoke the commands in this cog.""" checks = [
frequency = PER_ALWAYS distros = ['all'] schema = { 'id': 'cc_resizefs', 'name': 'Resizefs', 'title': 'Resize filesystem', 'description': dedent("""\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized along with it. By default, ``cc_resizefs`` will resize the root partition and will block the boot process while the resize command is running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be disabled altogether by setting ``resize_rootfs`` to ``false``."""), 'distros': distros, 'examples': ['resize_rootfs: false # disable root filesystem resize operation'], 'frequency': PER_ALWAYS, 'type': 'object', 'properties': { 'resize_rootfs': { 'enum': [True, False, NOBLOCK],
from roycemorebot.checks import has_any_role_check, has_no_roles_check from roycemorebot.constants import CLASS_ROLES, Channels, MOD_ROLES from roycemorebot.constants import ClassRoles as CRoles from roycemorebot.constants import Emoji, Guild, Messages log = logging.getLogger(__name__) WELCOME_MESSAGE = textwrap.dedent(f"""\ **__To get started:__** - Read the rules if you didn't already. - **Go to the [#roles]({Messages.roles}) channel** and get a Class Role. *Note: __This is mandatory!__ Read Rule #6.* - Set your nickname to your real name (`/nick NAME`) to help others identify you. - To contact mods, DM ModMail#5460 or ping them if they are needed immediately. - Server invite link is {Guild.invite_link}. Invite your friends! All of this, and more, is described in [#welcome]({Messages.welcome}). """) class ClassRoles(commands.Cog, name="Class Roles"): """User-assigned roles based on their grade.""" def __init__(self, bot: commands.Bot): self.bot = bot @commands.Cog.listener()
def main(self): sub_command_help = dedent(""" Zaps the given logical volume(s), raw device(s) or partition(s) for reuse by ceph-volume. If given a path to a logical volume it must be in the format of vg/lv. Any filesystems present on the given device, vg/lv, or partition will be removed and all data will be purged. If the logical volume, raw device or partition is being used for any ceph related mount points they will be unmounted. However, the lv or partition will be kept intact. Example calls for supported scenarios: Zapping a logical volume: ceph-volume lvm zap {vg name/lv name} Zapping a partition: ceph-volume lvm zap /dev/sdc1 Zapping many raw devices: ceph-volume lvm zap /dev/sda /dev/sdb /db/sdc Zapping devices associated with an OSD ID: ceph-volume lvm zap --osd-id 1 Optionally include the OSD FSID ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D If the --destroy flag is given and you are zapping a raw device or partition then all vgs and lvs that exist on that raw device or partition will be destroyed. This is especially useful if a raw device or partition was used by ceph-volume lvm create or ceph-volume lvm prepare commands previously and now you want to reuse that device. For example: ceph-volume lvm zap /dev/sda --destroy If the --destroy flag is given and you are zapping an lv then the lv is still kept intact for reuse. """) parser = argparse.ArgumentParser( prog='ceph-volume lvm zap', formatter_class=argparse.RawDescriptionHelpFormatter, description=sub_command_help, ) parser.add_argument( 'devices', metavar='DEVICES', nargs='*', type=arg_validators.ValidDevice(gpt_ok=True), default=[], help= 'Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)' ) parser.add_argument( '--destroy', action='store_true', default=False, help= 'Destroy all volume groups and logical volumes if you are zapping a raw device or partition', ) parser.add_argument( '--osd-id', help='Specify an OSD ID to detect associated devices for zapping', ) parser.add_argument( '--osd-fsid', help='Specify an OSD FSID to detect associated devices for zapping', ) if len(self.argv) == 0: print(sub_command_help) return self.args = parser.parse_args(self.argv) if self.args.osd_id or self.args.osd_fsid: self.zap_osd() else: self.zap()
import datetime import textwrap import unittest from wpull.ftp.ls.listing import FileEntry from wpull.ftp.util import parse_address, reply_code_tuple, \ parse_machine_listing, machine_listings_to_file_entries SAMPLE_LISTING_1 = textwrap.dedent('''\ Type=cdir;Modify=19981107085215;Perm=el; tmp Type=cdir;Modify=19981107085215;Perm=el; /tmp Type=pdir;Modify=19990112030508;Perm=el; .. Type=file;Size=25730;Modify=19940728095854;Perm=; capmux.tar.z ''') class TestUtil(unittest.TestCase): def test_parse_address(self): self.assertEqual( ('127.0.0.1', 34805), parse_address('227 Now Entering Passive Mode (127,0,0,1,135,245)')) self.assertEqual( ('127.0.0.1', 254), parse_address('227 Passive Mode! (127, 000, 000, 001, 000, 254)')) def test_reply_code_tuple(self): self.assertEqual((1, 2, 3), reply_code_tuple(123)) self.assertEqual((5, 0, 1), reply_code_tuple(501)) self.assertEqual((0, 0, 1), reply_code_tuple(1)) def test_parse_machine_listing(self):
def lint(modified_only=False, project_directory='.', verbose=False): """Lint project.""" config_path = os.path.abspath( os.path.join(project_directory, '.bellybutton.yml')) try: with open(config_path, 'r') as f: rules = load_config(f) except IOError: message = "ERROR: Configuration file path `{}` does not exist." print(error(message.format(config_path))) return 1 except InvalidNode as e: message = "ERROR: When parsing {}: {!r}" print(error(message.format(config_path, e))) return 1 if verbose: failure_message = dedent(""" \033[95m{path}:{lineno}\033[0m\t\033[1;95;4m{rule.name}\033[0m \033[1mDescription\033[0m: {rule.description} \033[1mLine\033[0m: {line} \033[1mExample\033[0m: {rule.example} \033[1mInstead\033[0m: {rule.instead} """).lstrip() else: failure_message = "{path}:{lineno}\t{rule.name}: {rule.description}" num_files = 0 failures = 0 files = walk_python_files(os.path.abspath(project_directory)) for filepath, file_contents in files: relpath = os.path.relpath(filepath, project_directory) linting_results = list(lint_file(filepath, file_contents, rules)) if not linting_results: continue num_files += 1 failure_results = (result for result in linting_results if not result.succeeded) for failure in failure_results: failures += 1 print( failure_message.format( path=relpath, lineno=failure.lineno, line=file_contents.splitlines()[failure.lineno - 1], rule=failure.rule, )) final_message = "Linting {} ({} rule{}, {} file{}, {} violation{}).".format( 'failed' if failures else 'succeeded', len(rules), '' if len(rules) == 1 else 's', num_files, '' if num_files == 1 else 's', failures, '' if failures == 1 else 's', ) print((error if failures else success)(final_message)) return 1 if failures else 0
dest='docdest', help=d[-1]) # *** compilation mode (dev) parser.add_argument( '-d', '--debug', default=False, action='store_true', dest='debug', help="Development mode") parser.add_argument( '-v', '--version', default=False, action='version', version=textwrap.dedent("\n".join([ '%(prog)s '+__version__, __copyright__,__license__])), help="Print version information and exit.") # read command-line arguments and options args = parser.parse_args() needHeader=True for d in documents: if eval('args.'+d[0]): data["doctype"]=d[0] data["teXfile"]=eval('args.'+d[0]+'[0]') needHeader=False break for arg in ["docvers", "docdest", "debug"]: if eval('args.'+arg):
def assert_installed(self, pkg_name, editable=True, with_files=[], without_files=[], without_egg_link=False, use_user_site=False, sub_dir=False): e = self.test_env if editable: pkg_dir = e.venv / 'src' / pkg_name.lower() # If package was installed in a sub directory if sub_dir: pkg_dir = pkg_dir / sub_dir else: without_egg_link = True pkg_dir = e.site_packages / pkg_name if use_user_site: egg_link_path = e.user_site / pkg_name + '.egg-link' else: egg_link_path = e.site_packages / pkg_name + '.egg-link' if without_egg_link: if egg_link_path in self.files_created: raise TestFailure( 'unexpected egg link file created: %r\n%s' % (egg_link_path, self) ) else: if egg_link_path not in self.files_created: raise TestFailure( 'expected egg link file missing: %r\n%s' % (egg_link_path, self) ) egg_link_file = self.files_created[egg_link_path] egg_link_contents = egg_link_file.bytes.replace(os.linesep, '\n') # FIXME: I don't understand why there's a trailing . here if not (egg_link_contents.endswith('\n.') and egg_link_contents[:-2].endswith(pkg_dir)): raise TestFailure(textwrap.dedent(u'''\ Incorrect egg_link file %r Expected ending: %r ------- Actual contents ------- %s -------------------------------''' % ( egg_link_file, pkg_dir + '\n.', repr(egg_link_contents)) )) if use_user_site: pth_file = e.user_site / 'easy-install.pth' else: pth_file = e.site_packages / 'easy-install.pth' if (pth_file in self.files_updated) == without_egg_link: raise TestFailure('%r unexpectedly %supdated by install' % ( pth_file, (not without_egg_link and 'not ' or ''))) if (pkg_dir in self.files_created) == (curdir in without_files): raise TestFailure(textwrap.dedent('''\ expected package directory %r %sto be created actually created: %s ''') % ( pkg_dir, (curdir in without_files and 'not ' or ''), sorted(self.files_created.keys()))) for f in with_files: if not (pkg_dir / f).normpath in self.files_created: raise TestFailure( 'Package directory %r missing expected content %r' % (pkg_dir, f) ) for f in without_files: if (pkg_dir / f).normpath in self.files_created: raise TestFailure( 'Package directory %r has unexpected content %f' % (pkg_dir, f) )
async def unload_js(request: Request): """ This generates a javascript that you can embed into any webview in order to enable webview closing events. You need to sign the webview using the `sign_webview` parameter of an UrlButton. If you want to close/change your page without triggering the page close event, you can call in JS `bernard.unloadNotifier.inhibit()`. """ request_url = str(request.url) request_url = re.sub(r'^https?://', 'wss://', request_url) script = """ (function () { var STORAGE_KEY = '_bnd_user'; function UnloadNotifier() { var self = this, intervalId, ws; function getSearch() { if (window.location.search.indexOf('_bnd_user='******'beat'); } function unload() { ws.send('unload'); inhibit(); } function inhibit() { ws.send('inhibit'); clearInterval(intervalId); ws.close(); } self.inhibit = inhibit; self.unload = unload; (function () { connect(); window.addEventListener('beforeunload', unload); }()); } window.bernard = { unloadNotifier: new UnloadNotifier() }; }()); """ script = script.replace( 'WS_URL', ujson.dumps(urljoin(request_url, '/unload/facebook.sock'))) script = script.replace('HEARTBEAT_PERIOD', ujson.dumps(settings.WEBVIEW_HEARTBEAT_PERIOD)) script = dedent(script).strip() return Response(text=script, content_type='text/javascript')
def test_adjust_operator_bundle_related_images_with_env_var( mock_grbi, mock_gil, mock_aca, mock_gri, mock_apns, mock_gpa, tmpdir): annotations = { 'marketplace.company.io/remote-workflow': ('https://marketplace.company.com/en-us/operators/{package_name}/pricing' ), 'marketplace.company.io/support-workflow': ('https://marketplace.company.com/en-us/operators/{package_name}/support' ), } image_digest = '749327' mock_gpa.return_value = { 'annotations': { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams' } } mock_apns.return_value = ( 'amqstreams', { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams-cmp' }, ) mock_gil.return_value = {'name': 'namespace/reponame', 'version': 'rhel-8'} manifests_dir = tmpdir.mkdir('manifests') metadata_dir = tmpdir.mkdir('metadata') csv = manifests_dir.join('csv.yaml') csv_template = textwrap.dedent("""\ apiVersion: operators.example.com/v1 kind: ClusterServiceVersion metadata: name: amqstreams.v1.0.0 namespace: placeholder annotations: containerImage: {registry}/{operator}{image}{ref} spec: install: spec: deployments: - spec: template: spec: containers: - name: image-annotation image: {registry}/{operator}{image}{ref} env: - name: RELATED_IMAGE_SOMETHING value: {registry}/{operator}{image}{related_ref} relatedImages: - name: {related_name} image: {registry}/{operator}{image}{related_ref} """) csv.write( csv_template.format( registry='quay.io', operator='operator', image='/image', ref=':v1', related_name=f'image-{image_digest}-annotation', related_ref='@sha256:749327', )) def get_resolved_image(image): return { 'quay.io/operator/image:v1': 'quay.io/operator/image@sha256:749325', 'quay.io/operator/image@sha256:749327': 'quay.io/operator/image@sha256:749327', }[image] mock_gri.side_effect = get_resolved_image labels = build_regenerate_bundle._adjust_operator_bundle( str(manifests_dir), str(metadata_dir), 1, 'company-marketplace') assert labels == { 'com.redhat.iib.pinned': 'true', 'operators.operatorframework.io.bundle.package.v1': 'amqstreams-cmp', } assert csv.read_text('utf-8') == csv_template.format( registry='quay.io', operator='namespace/reponame', image='-rhel-8-final', ref='@sha256:749325', related_name=f'image-{image_digest}-annotation', related_ref='@sha256:749327', ) mock_aca.assert_called_once_with(mock.ANY, 'amqstreams', annotations) assert mock_gil.call_count == 2
def create_basic_wheel_for_package(script, name, version, depends, extras): files = { "{name}/__init__.py": """ def hello(): return "Hello From {name}" """, "{dist_info}/DESCRIPTION": """ UNKNOWN """, "{dist_info}/WHEEL": """ Wheel-Version: 1.0 Generator: pip-test-suite Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any """, "{dist_info}/METADATA": """ Metadata-Version: 2.0 Name: {name} Version: {version} Summary: UNKNOWN Home-page: UNKNOWN Author: UNKNOWN Author-email: UNKNOWN License: UNKNOWN Platform: UNKNOWN {requires_dist} UNKNOWN """, "{dist_info}/top_level.txt": """ {name} """, # Have an empty RECORD becuase we don't want to be checking hashes. "{dist_info}/RECORD": "" } # Some useful shorthands archive_name = "{name}-{version}-py2.py3-none-any.whl".format( name=name, version=version ) dist_info = "{name}-{version}.dist-info".format( name=name, version=version ) requires_dist = "\n".join([ "Requires-Dist: {}".format(pkg) for pkg in depends ] + [ "Provides-Extra: {}".format(pkg) for pkg in extras.keys() ] + [ "Requires-Dist: {}; extra == \"{}\"".format(pkg, extra) for extra in extras for pkg in extras[extra] ]) # Replace key-values with formatted values for key, value in list(files.items()): del files[key] key = key.format(name=name, dist_info=dist_info) files[key] = textwrap.dedent(value).format( name=name, version=version, requires_dist=requires_dist ).strip() for fname in files: path = script.temp_path / fname path.folder.mkdir() path.write(files[fname]) retval = script.scratch_path / archive_name generated = shutil.make_archive(retval, 'zip', script.temp_path) shutil.move(generated, retval) script.temp_path.rmtree() script.temp_path.mkdir() return retval
def test_handle_regenerate_bundle_request( mock_ur, mock_gwc, mock_capml, mock_srs, mock_pi, mock_bi, mock_aob, mock_cffi, mock_gia, mock_temp_dir, mock_pp, mock_gri, mock_cleanup, mock_gil, iib_index_image_output_registry, expected_bundle_image, pinned_by_iib_label, pinned_by_iib_bool, tmpdir, ): arches = ['amd64', 's390x'] from_bundle_image = 'bundle-image:latest' from_bundle_image_resolved = 'bundle-image@sha256:abcdef' bundle_image = 'quay.io/iib:99' organization = 'acme' request_id = 99 mock_temp_dir.return_value.__enter__.return_value = str(tmpdir) mock_gri.return_value = from_bundle_image_resolved mock_gia.return_value = list(arches) mock_aob.return_value = { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams-cmp' } mock_capml.return_value = bundle_image mock_gwc.return_value = { 'iib_index_image_output_registry': iib_index_image_output_registry, 'iib_registry': 'quay.io', } mock_gil.return_value = pinned_by_iib_label build_regenerate_bundle.handle_regenerate_bundle_request( from_bundle_image, organization, request_id) mock_cleanup.assert_called_once() mock_gri.assert_called_once() mock_gri.assert_called_with('bundle-image:latest') mock_pp.assert_called_once_with(from_bundle_image_resolved) mock_gia.assert_called_once() mock_gia.assert_called_with('bundle-image@sha256:abcdef') assert mock_cffi.call_count == 2 mock_cffi.assert_has_calls(( mock.call('bundle-image@sha256:abcdef', '/manifests', mock.ANY), mock.call('bundle-image@sha256:abcdef', '/metadata', mock.ANY), )) mock_aob.assert_called_once_with( str(tmpdir.join('manifests')), str(tmpdir.join('metadata')), request_id, 'acme', pinned_by_iib_bool, ) assert mock_bi.call_count == len(arches) assert mock_pi.call_count == len(arches) for arch in arches: mock_bi.assert_any_call(mock.ANY, 'Dockerfile', request_id, arch) mock_pi.assert_any_call(request_id, arch) assert mock_srs.call_count == 2 mock_srs.assert_has_calls([ mock.call(request_id, 'in_progress', 'Resolving from_bundle_image'), mock.call(request_id, 'in_progress', 'Creating the manifest list'), ]) mock_capml.assert_called_once_with(request_id, list(arches), []) assert mock_ur.call_count == 2 mock_ur.assert_has_calls([ mock.call( request_id, { 'from_bundle_image_resolved': from_bundle_image_resolved, 'state': 'in_progress', 'state_reason': ('Regenerating the bundle image for the following arches: amd64, s390x' ), }, exc_msg= 'Failed setting the resolved "from_bundle_image" on the request', ), mock.call( request_id, { 'arches': list(arches), 'bundle_image': expected_bundle_image, 'state': 'complete', 'state_reason': 'The request completed successfully', }, exc_msg='Failed setting the bundle image on the request', ), ]) with open(tmpdir.join('Dockerfile'), 'r') as f: dockerfile = f.read() expected_dockerfile = textwrap.dedent('''\ FROM bundle-image@sha256:abcdef COPY ./manifests /manifests COPY ./metadata /metadata LABEL operators.operatorframework.io.bundle.package.v1=amqstreams-cmp ''') assert dockerfile == expected_dockerfile
def test_adjust_operator_bundle_already_pinned_by_iib(mock_grbi, mock_gil, mock_aca, mock_gri, mock_apns, mock_gpa, tmpdir): annotations = { 'marketplace.company.io/remote-workflow': ('https://marketplace.company.com/en-us/operators/{package_name}/pricing' ), 'marketplace.company.io/support-workflow': ('https://marketplace.company.com/en-us/operators/{package_name}/support' ), } mock_gpa.return_value = { 'annotations': { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams' } } mock_apns.return_value = ( 'amqstreams', { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams-cmp' }, ) mock_gil.return_value = {'name': 'namespace/reponame', 'version': 'rhel-8'} manifests_dir = tmpdir.mkdir('manifests') metadata_dir = tmpdir.mkdir('metadata') csv1 = manifests_dir.join('2.clusterserviceversion.yaml') csv2 = manifests_dir.join('3.clusterserviceversion.yaml') # NOTE: The OperatorManifest class is capable of modifying pull specs found in # various locations within the CSV file. Since IIB relies on this class to do # such modifications, this test only verifies that at least one of the locations # is being handled properly. This is to ensure IIB is using OperatorManifest # correctly. csv_template = textwrap.dedent("""\ apiVersion: operators.example.com/v1 kind: ClusterServiceVersion metadata: name: amqstreams.v1.0.0 namespace: placeholder annotations: containerImage: {registry}/{operator}{image}{ref} """) csv_related_images_template = csv_template + textwrap.dedent("""\ spec: relatedImages: - name: {related_name} image: {registry}/{operator}{image}{related_ref} """) csv1.write( csv_related_images_template.format( registry='quay.io', ref='@sha256:654321', related_name='image-654321-annotation', related_ref='@sha256:654321', operator='operator', image='/image', )) csv2.write( csv_related_images_template.format( # This registry for the company-marketplace will be replaced based on # worker configuration. registry='registry.access.company.com', ref='@sha256:765432', related_name=f'operator/image-765432-annotation', related_ref='@sha256:765432', operator='operator', image='/image', )) labels = build_regenerate_bundle._adjust_operator_bundle( str(manifests_dir), str(metadata_dir), 1, 'company-marketplace', pinned_by_iib=True) # The com.redhat.iib.pinned label is not explicitly set, but inherited from the original image assert labels == { 'operators.operatorframework.io.bundle.package.v1': 'amqstreams-cmp' } assert csv1.read_text('utf-8') == csv_related_images_template.format( registry='quay.io', ref='@sha256:654321', related_name=f'image-654321-annotation', related_ref='@sha256:654321', operator='namespace/reponame', image='-rhel-8-final', ) assert csv2.read_text('utf-8') == csv_related_images_template.format( registry='registry.marketplace.company.com', ref='@sha256:765432', related_name=f'operator/image-765432-annotation', related_ref='@sha256:765432', operator='namespace/reponame', image='-rhel-8-final', ) mock_aca.assert_called_once_with(mock.ANY, 'amqstreams', annotations) mock_gri.assert_not_called()