def test_r(): r_script_path = os.path.join(THIS_DIR, 'jobs', 'R', 'dataframe.R') r_script_url = utils.upload_file(r_script_path) utils.run_tests(app_url=r_script_url, app_args='', expected_output="Justin", app_name="/spark")
def main(): print "Starting!" m = 2 # method to use # Database tests ''' data_path = "../../Dataset_4NSigComp2010/TrainingData/" pickle_path = "../../Data/pickle_" for user_dir in os.listdir(data_path): if user_dir[0] == ".": # skip parent path continue print "User = "******"/", pickle_path + user_dir + "_v" + str(m), num_runs=20, method=m) data_path = "../../signDist1.0/images1/" for user_dir in os.listdir(data_path): if user_dir[0] == ".": # skip parent path continue print "User = "******"/", pickle_path + user_dir + "_v" + str(m), method=m, num_runs=1) ''' # Test individual subject data_path = "../../signDist1.0/images2/subject_002/" pickle_path = "../../Data/test" utils.run_tests(data_path, pickle_path, num_runs=10, method=m) print "Done!"
def test_cni(): SPARK_EXAMPLES="http://downloads.mesosphere.com/spark/assets/spark-examples_2.11-2.0.1.jar" utils.run_tests(app_url=SPARK_EXAMPLES, app_args="", expected_output="Pi is roughly 3", app_name="/spark", args=["--conf", "spark.mesos.network.name=dcos", "--class", "org.apache.spark.examples.SparkPi"])
def test_teragen(): if utils.hdfs_enabled(): jar_url = 'https://downloads.mesosphere.io/spark/examples/spark-terasort-1.0-jar-with-dependencies_2.11.jar' utils.run_tests(app_url=jar_url, app_args="1g hdfs:///terasort_in", expected_output="Number of records written", app_name="/spark", args=["--class", "com.github.ehiggs.spark.terasort.TeraGen"])
def _run_teravalidate(): jar_url = TERASORT_JAR utils.run_tests( jar_url, "hdfs:///terasort_out hdfs:///terasort_validate", "partitions are properly sorted", [ "--class", "com.github.ehiggs.spark.terasort.TeraValidate", "--conf", "spark.cores.max={}".format(TERASORT_MAX_CORES) ])
def test_python(): python_script_path = os.path.join(THIS_DIR, 'jobs', 'python', 'pi_with_include.py') python_script_url = _upload_file(python_script_path) py_file_path = os.path.join(THIS_DIR, 'jobs', 'python', 'PySparkTestInclude.py') py_file_url = _upload_file(py_file_path) utils.run_tests(python_script_url, "30", "Pi is roughly 3", ["--py-files", py_file_url])
def _run_teragen(): jar_url = TERASORT_JAR input_size = os.getenv('TERASORT_INPUT_SIZE', '1g') utils.run_tests( jar_url, "{} hdfs:///terasort_in".format(input_size), "Number of records written", [ "--class", "com.github.ehiggs.spark.terasort.TeraGen", "--conf", "spark.cores.max={}".format(TERASORT_MAX_CORES) ])
def test_cli_multiple_spaces(): utils.run_tests(app_url=SPARK_EXAMPLES, app_args="30", expected_output="Pi is roughly 3", app_name="/spark", args=[ "--conf ", "spark.cores.max=2", " --class ", "org.apache.spark.examples.SparkPi" ])
def test_jar(): master_url = ("https" if utils.is_strict() else "http") + "://leader.mesos:5050" spark_job_runner_args = '{} dcos \\"*\\" spark:only 2 --auth-token={}'.format( master_url, shakedown.dcos_acs_token()) jar_url = _upload_file(os.getenv('TEST_JAR_PATH')) utils.run_tests(jar_url, spark_job_runner_args, "All tests passed", [ "--class", 'com.typesafe.spark.test.mesos.framework.runners.SparkJobRunner' ])
def test_python(): python_script_path = os.path.join(THIS_DIR, 'jobs', 'python', 'pi_with_include.py') python_script_url = utils.upload_file(python_script_path) py_file_path = os.path.join(THIS_DIR, 'jobs', 'python', 'PySparkTestInclude.py') py_file_url = utils.upload_file(py_file_path) utils.run_tests(app_url=python_script_url, app_args="30", expected_output="Pi is roughly 3", app_name="/spark", args=["--py-files", py_file_url])
def test_secrets(): properties_file_path = os.path.join(THIS_DIR, "resources", "secrets-opts.txt") secrets_handler = utils.SecretHandler(SECRET_NAME, SECRET_CONTENTS) r = secrets_handler.create_secret() assert r.ok, "Error creating secret, {}".format(r.content) secret_file_name = "secret_file" output = "Contents of file {}: {}".format(secret_file_name, SECRET_CONTENTS) args = ["--properties-file", properties_file_path, "--class", "SecretsJob"] utils.run_tests(app_url=_scala_test_jar_url(), app_args=secret_file_name, expected_output=output, app_name="/spark", args=args) r = secrets_handler.delete_secret() if not r.ok: LOGGER.warn("Error when deleting secret, {}".format(r.content))
def test_s3(): linecount_path = os.path.join(THIS_DIR, 'resources', 'linecount.txt') s3.upload_file(linecount_path) app_args = "{} {}".format(s3.s3n_url('linecount.txt'), s3.s3n_url("linecount-out")) args = [ "--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job" ] utils.run_tests(_upload_file(os.environ["SCALA_TEST_JAR_PATH"]), app_args, "", args) assert len(list(s3.list("linecount-out"))) > 0
def test_kerberos(): '''This test must be run manually against a kerberized HDFS cluster. Instructions for setting one up are here: https://docs.google.com/document/d/1lqlEIs98j1VsAyoEYnhYoaNmYylcoaBAwHpD29yKjU4. You must set 'principal' and 'keytab' to the appropriate values, and change 'krb5.conf' to the name of some text file you've written to HDFS. ''' principal = "nn/ip-10-0-2-134.us-west-2.compute.internal@LOCAL" keytab = "nn.ip-10-0-2-134.us-west-2.compute.internal.keytab" utils.run_tests( "http://infinity-artifacts.s3.amazonaws.com/spark/sparkjob-assembly-1.0.jar", "hdfs:///krb5.conf", "number of words in", [ "--class", "HDFSWordCount", "--principal", principal, "--keytab", keytab, "--conf", "sun.security.krb5.debug=true" ])
def test_s3(): linecount_path = os.path.join(THIS_DIR, 'resources', 'linecount.txt') s3.upload_file(linecount_path) app_args = "{} {}".format(s3.s3n_url('linecount.txt'), s3.s3n_url("linecount-out")) args = [ "--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job" ] utils.run_tests(app_url=_scala_test_jar_url(), app_args=app_args, expected_output="", app_name="/spark", args=args) assert len(list(s3.list("linecount-out"))) > 0
def test_015_list_devices(plex, user=None): assert user, 'Must specify username, password & server to run this test.' for device in user.devices(): log(2, device.name or device.product) # def test_013_sync_items(plex, user=None): # user = MyPlexUser('user', 'pass') # device = user.getDevice('device-uuid') # # fetch the sync items via the device sync list # for item in device.sync_items(): # # fetch the media object associated with the sync item # for video in item.get_media(): # # fetch the media parts (actual video/audio streams) associated with the media # for part in video.iter_parts(): # print('Found media to download!') # # make the relevant sync id (media part) as downloaded # # this tells the server that this device has successfully downloaded this media part of this sync item # item.mark_as_done(part.sync_id) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Run PlexAPI tests.') parser.add_argument('-s', '--server', help='Name of the Plex server (requires user/pass).') parser.add_argument('-u', '--username', help='Username for the Plex server.') parser.add_argument('-p', '--password', help='Password for the Plex server.') parser.add_argument('-n', '--name', help='Only run tests containing this string. Leave blank to run all tests.') args = parser.parse_args() run_tests(__name__, args)
def test_sparkPi(): utils.run_tests(app_url=SPARK_EXAMPLES, app_args="100", expected_output="Pi is roughly 3", app_name="/spark", args=["--class org.apache.spark.examples.SparkPi"])
# device = user.getDevice('device-uuid') # # fetch the sync items via the device sync list # for item in device.sync_items(): # # fetch the media object associated with the sync item # for video in item.get_media(): # # fetch the media parts (actual video/audio streams) associated with the media # for part in video.iter_parts(): # print('Found media to download!') # # make the relevant sync id (media part) as downloaded # # this tells the server that this device has successfully downloaded this media part of this sync item # item.mark_as_done(part.sync_id) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Run PlexAPI tests.') parser.add_argument('-r', '--resource', help='Name of the Plex resource (requires user/pass).') parser.add_argument('-u', '--username', help='Username for the Plex server.') parser.add_argument('-p', '--password', help='Password for the Plex server.') parser.add_argument( '-n', '--name', help= 'Only run tests containing this string. Leave blank to run all tests.') args = parser.parse_args() run_tests(__name__, args)
# the new terms are clearly indicated on the first page of each file where # they apply. # # IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY # DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE # IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE # NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR # MODIFICATIONS. import utils import unittest import glob import os.path if __name__ == '__main__': suite = unittest.TestSuite() for i in glob.glob('*.py'): m = __import__(os.path.splitext(i)[0]) if hasattr(m, 'get_suite'): suite.addTest(m.get_suite()) utils.run_tests(suite)
def main(): test_dir = os.path.dirname(sys.argv[0]) test_file_pattern = os.path.join(test_dir, 'test_*.py') test_files = [os.path.basename(f) for f in glob.glob(test_file_pattern)] test_modules = [f[:-3] for f in test_files] run_tests(test_modules)
self.assertEqual(ret, length, str(ret) + ' != ' + str(length)) self.assertEqual(buff, data, 'Failed to read data: ' + \ str(data) + \ ', in EP = ' + \ str(ep)) if utils.is_windows(): time.sleep(0.5) def get_suite(): suite = unittest.TestSuite() for m in (libusb1, libusb0, openusb): b = m.get_backend() if b is not None and utils.find_my_device(b): utils.logger.info('Adding %s(%s) to test suite...', BackendTest.__name__, m.__name__) suite.addTest(BackendTest(b)) else: utils.logger.warning('%s(%s) is not available', BackendTest.__name__, m.__name__) return suite if __name__ == '__main__': utils.run_tests(get_suite())
# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import utils import unittest import glob import os.path import sys if __name__ == '__main__': suite = unittest.TestSuite() for i in glob.glob('*.py'): m = __import__(os.path.splitext(i)[0]) if hasattr(m, 'get_suite'): suite.addTest(m.get_suite()) ret = utils.run_tests(suite) if not ret.wasSuccessful(): sys.exit(1)
def test_cni(): SPARK_EXAMPLES = "http://downloads.mesosphere.com/spark/assets/spark-examples_2.11-2.0.1.jar" utils.run_tests(SPARK_EXAMPLES, "", "Pi is roughly 3", [ "--conf", "spark.mesos.network.name=dcos", "--class", "org.apache.spark.examples.SparkPi" ])
def test_r(): r_script_path = os.path.join(THIS_DIR, 'jobs', 'R', 'dataframe.R') r_script_url = _upload_file(r_script_path) utils.run_tests(r_script_url, '', "Justin")
def main(argv): tests = utils.load_tests(argv[1]) test_file_name = utils.create_test_file(argv[2], tests) utils.run_tests(test_file_name) os.remove(test_file_name)
def _run_terasort(): jar_url = TERASORT_JAR utils.run_tests(jar_url, "hdfs:///terasort_in hdfs:///terasort_out", "", [ "--class", "com.github.ehiggs.spark.terasort.TeraSort", "--conf", "spark.cores.max={}".format(TERASORT_MAX_CORES) ])
self.assertEqual( len( find( find_all=True, backend=b, custom_match = lambda d: d.idProduct==1 ), ), 1 ) self.assertEqual( len( find( find_all=True, backend=b, custom_match = lambda d: d.idVendor==devinfo.ID_VENDOR, idProduct=1 ), ), 1 ) def get_suite(): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FindTest)) return suite if __name__ == '__main__': utils.run_tests(get_suite())