def test_copy_unicode(self):
     if not self.supports_unicode:
         raise unittest.SkipTest()
     self.copy(u"ಠ_ಠ")
# regression test for SAX 2.0
# $Id$

from xml.sax import make_parser, ContentHandler, \
                    SAXException, SAXReaderNotAvailable, SAXParseException
import unittest
try:
    make_parser()
except SAXReaderNotAvailable:
    # don't try to test this module if we cannot create a parser
    raise unittest.SkipTest("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
                             XMLFilterBase, prepare_input_source
from xml.sax.expatreader import create_parser
from xml.sax.handler import feature_namespaces
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from io import BytesIO, StringIO
import codecs
import os.path
import shutil
from test import support
from test.support import findfile, run_unittest, TESTFN

TEST_XMLFILE = findfile("test.xml", subdir="xmltestdata")
TEST_XMLFILE_OUT = findfile("test.xml.out", subdir="xmltestdata")
try:
    TEST_XMLFILE.encode("utf-8")
    TEST_XMLFILE_OUT.encode("utf-8")
except UnicodeEncodeError:
    raise unittest.SkipTest("filename is not encodable to utf8")
Пример #3
0
 def setUpClass(cls):
     super(FileSQLiteTest, cls).setUpClass()
     if cls._db_type != 'sqlite':
         raise unittest.SkipTest('SQLite tests are disabled.')
Пример #4
0
 def wrapper(self, *args, **kwargs):
     if cond(self):
         raise unittest.SkipTest(reason)
     else:
         impl(self, *args, **kwargs)
 def setUpClass(cls):
     if "TEST_NET_ACCESS" not in os.environ:
         raise unittest.SkipTest(
             "Test tests remote 3rd party access (set 'TEST_NET_ACCESS' "
             "env to enable)")
     super(RemoteResourceTest, cls).setUpClass()
Пример #6
0
    def test01_local(self):
        # This test executes all commands of the local grid manager and asserts that everything is fine

        # first test, if the '/bin/bash' exists
        bash = "/bin/bash"
        if not os.path.exists(bash):
            raise unittest.SkipTest(
                "Could not find '%s' which is required to run the test scripts"
                % bash
            )

        try:

            import nose

            # first, add some commands to the database
            script_1 = pkg_resources.resource_filename(
                __name__, "test_script.sh"
            )
            script_2 = pkg_resources.resource_filename(
                __name__, "test_array.sh"
            )
            rdir = pkg_resources.resource_filename("gridtk", "tests")
            from gridtk.script import jman

            # add a simple script that will write some information to the
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_1",
                    bash,
                    script_1,
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_2",
                    "--dependencies",
                    "1",
                    "--parametric",
                    "1-7:2",
                    bash,
                    script_2,
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_3",
                    "--dependencies",
                    "1",
                    "2",
                    "--exec-dir",
                    rdir,
                    bash,
                    "test_array.sh",
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_1",
                    "--repeat",
                    "2",
                    bash,
                    script_1,
                ]
            )

            # check that the database was created successfully
            self.assertTrue(os.path.exists(self.database))

            print()
            # test that the list command works (should also work with the "default" grid manager
            jman.main(
                [
                    self.jman,
                    "--database",
                    self.database,
                    "list",
                    "--job-ids",
                    "1",
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--database",
                    self.database,
                    "list",
                    "--job-ids",
                    "2",
                    "--print-array-jobs",
                    "--print-dependencies",
                    "--print-times",
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--database",
                    self.database,
                    "list",
                    "--job-ids",
                    "4-5",
                    "--print-array-jobs",
                    "--print-dependencies",
                    "--print-times",
                ]
            )

            # get insight into the database
            job_manager = gridtk.local.JobManagerLocal(database=self.database)
            session = job_manager.lock()
            jobs = list(session.query(Job))
            self.assertEqual(len(jobs), 5)
            self.assertEqual(jobs[0].id, 1)
            self.assertEqual(jobs[1].id, 2)
            self.assertEqual(jobs[2].id, 3)
            self.assertEqual(jobs[3].id, 4)
            self.assertEqual(jobs[4].id, 5)
            self.assertEqual(len(jobs[1].array), 4)
            self.assertEqual(jobs[0].status, "submitted")
            self.assertEqual(jobs[1].status, "submitted")
            self.assertEqual(jobs[2].status, "submitted")
            self.assertEqual(jobs[3].status, "submitted")
            self.assertEqual(jobs[4].status, "submitted")
            self.assertTrue(all(j.submit_time is not None for j in jobs))
            self.assertTrue(all(j.start_time is None for j in jobs))
            self.assertTrue(all(j.finish_time is None for j in jobs))
            self.assertTrue(
                all(j.submit_time is not None for j in jobs[1].array)
            )
            self.assertTrue(all(j.start_time is None for j in jobs[1].array))
            self.assertTrue(all(j.finish_time is None for j in jobs[1].array))

            # check that the job dependencies are correct
            waiting = jobs[0].get_jobs_waiting_for_us()
            self.assertEqual(len(waiting), 2)
            self.assertEqual(waiting[0].id, 2)
            self.assertEqual(waiting[1].id, 3)
            waited = jobs[2].get_jobs_we_wait_for()
            self.assertEqual(len(waited), 2)
            self.assertEqual(waited[0].id, 1)
            self.assertEqual(waited[1].id, 2)

            # check dependencies for --repeat
            waiting = jobs[3].get_jobs_waiting_for_us()
            self.assertEqual(len(waiting), 1)
            self.assertEqual(waiting[0].id, 5)
            waited = jobs[4].get_jobs_we_wait_for()
            self.assertEqual(len(waited), 1)
            self.assertEqual(waited[0].id, 4)

            job_manager.unlock()

            # now, start the local execution of the job in a parallel job
            self.scheduler_job = subprocess.Popen(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "run-scheduler",
                    "--sleep-time",
                    "5",
                    "--parallel",
                    "2",
                ]
            )

            # sleep some time to assure that the scheduler was able to start the first job
            time.sleep(5)
            # ... and kill the scheduler
            self.scheduler_job.kill()
            self.scheduler_job = None

            # now, the first job needs to have status failure, and the second needs to be queued
            session = job_manager.lock()
            jobs = list(session.query(Job))
            self.assertEqual(len(jobs), 5)
            if jobs[0].status in ("submitted", "queued", "executing"):
                # on slow machines, we don0t want the tests to fail, so we just skip
                job_manager.unlock()
                raise nose.plugins.skip.SkipTest(
                    "This machine seems to be quite slow in processing parallel jobs."
                )
            self.assertEqual(jobs[0].status, "failure")
            self.assertEqual(jobs[1].status, "queued")
            self.assertEqual(jobs[2].status, "waiting")
            self.assertTrue(jobs[0].start_time is not None)
            self.assertTrue(jobs[0].finish_time is not None)
            self.assertTrue(jobs[1].start_time is None)
            self.assertTrue(jobs[1].finish_time is None)
            self.assertTrue(jobs[2].start_time is None)
            self.assertTrue(jobs[2].finish_time is None)

            # the result files should already be there
            self.assertTrue(os.path.exists(jobs[0].std_out_file()))
            self.assertTrue(os.path.exists(jobs[0].std_err_file()))
            job_manager.unlock()

            # reset the job 1
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "resubmit",
                    "--job-id",
                    "1",
                    "--running-jobs",
                    "--overwrite-command",
                    bash,
                    script_1,
                ]
            )

            # now, start the local execution of the job in a parallel job
            self.scheduler_job = subprocess.Popen(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "run-scheduler",
                    "--sleep-time",
                    "5",
                    "--parallel",
                    "2",
                ]
            )

            # sleep some time to assure that the scheduler was able to finish the first and start the second job
            time.sleep(10)
            # ... and kill the scheduler
            self.scheduler_job.kill()
            self.scheduler_job = None

            # Job 1 and two array jobs of job two should be finished now, the other two still need to be queued
            session = job_manager.lock()
            jobs = list(session.query(Job))
            self.assertEqual(len(jobs), 5)
            if (
                jobs[0].status in ("queued", "executing")
                or jobs[1].status == "queued"
            ):
                # on slow machines, we don0t want the tests to fail, so we just skip
                job_manager.unlock()
                raise nose.plugins.skip.SkipTest(
                    "This machine seems to be quite slow in processing parallel jobs."
                )
            self.assertEqual(jobs[0].status, "failure")
            self.assertEqual(jobs[1].status, "executing")
            if (
                jobs[1].array[0].status == "executing"
                or jobs[1].array[1].status == "executing"
            ):
                # on slow machines, we don0t want the tests to fail, so we just skip
                job_manager.unlock()
                raise nose.plugins.skip.SkipTest(
                    "This machine seems to be quite slow in processing parallel jobs."
                )
            self.assertEqual(jobs[1].array[0].status, "failure")
            self.assertEqual(jobs[1].array[0].result, 1)
            self.assertEqual(jobs[1].array[1].status, "success")
            self.assertEqual(jobs[1].array[1].result, 0)
            self.assertEqual(
                len([a for a in jobs[1].array if a.status == "queued"]), 2
            )
            out_file = jobs[0].std_out_file()
            err_file = jobs[0].std_err_file()
            job_manager.unlock()

            # the result files of the first job should now be there
            self.assertTrue(os.path.isfile(out_file))
            self.assertTrue(os.path.isfile(err_file))
            self.assertEqual(
                open(out_file).read().rstrip(),
                "This is a text message to std-out",
            )
            self.assertTrue(
                "This is a text message to std-err"
                in open(err_file).read().split("\n")
            )

            # resubmit all jobs
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "resubmit",
                    "--running-jobs",
                ]
            )
            # check that the log files have been cleaned
            self.assertFalse(os.path.exists(out_file))
            self.assertFalse(os.path.exists(err_file))
            # ... but the log dir still exists
            self.assertTrue(os.path.exists(self.log_dir))

            # now, let the scheduler run all jobs, but this time in verbose mode
            self.scheduler_job = subprocess.Popen(
                [
                    self.jman,
                    "--local",
                    "-vv",
                    "--database",
                    self.database,
                    "run-scheduler",
                    "--sleep-time",
                    "1",
                    "--parallel",
                    "2",
                    "--die-when-finished",
                ]
            )
            # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
            self.scheduler_job.wait()
            self.scheduler_job = None

            # check that all output files are generated again
            self.assertTrue(os.path.isfile(out_file))
            self.assertTrue(os.path.isfile(err_file))
            self.assertEqual(
                open(out_file).read().rstrip(),
                "This is a text message to std-out",
            )
            self.assertTrue(
                "This is a text message to std-err"
                in open(err_file).read().split("\n")
            )

            # check that exactly four output and four error files have been created
            files = os.listdir(self.log_dir)
            self.assertEqual(len(files), 16)
            for i in range(1, 8, 2):
                self.assertTrue("test_2.o2.%d" % i in files)
                self.assertTrue("test_2.e2.%d" % i in files)

            # check that all array jobs are finished now
            session = job_manager.lock()
            jobs = list(session.query(Job))
            self.assertEqual(len(jobs), 5)
            self.assertEqual(jobs[1].status, "failure")
            self.assertEqual(jobs[1].array[0].status, "failure")
            self.assertEqual(jobs[1].array[0].result, 1)
            for i in range(1, 4):
                self.assertEqual(jobs[1].array[i].status, "success")
                self.assertEqual(jobs[1].array[i].result, 0)
            self.assertEqual(jobs[2].status, "success")
            self.assertEqual(jobs[2].result, 0)

            self.assertTrue(all(j.submit_time is not None for j in jobs))
            self.assertTrue(all(j.start_time is not None for j in jobs))
            self.assertTrue(all(j.finish_time is not None for j in jobs))
            self.assertTrue(
                all(j.submit_time is not None for j in jobs[1].array)
            )
            self.assertTrue(
                all(j.start_time is not None for j in jobs[1].array)
            )
            self.assertTrue(
                all(j.finish_time is not None for j in jobs[1].array)
            )

            job_manager.unlock()

            print()
            # test that the list command still works
            jman.main(
                [
                    self.jman,
                    "--database",
                    self.database,
                    "list",
                    "--print-array-jobs",
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--database",
                    self.database,
                    "list",
                    "--long",
                    "--print-array-jobs",
                ]
            )

            print()
            # test that the report command works
            jman.main([self.jman, "--database", self.database, "report"])

            # clean-up
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "delete",
                    "--job-ids",
                    "1+4",
                ]
            )

            # check that the database and the log files are gone
            self.assertEqual(len(os.listdir(self.temp_dir)), 0)

            # add the scripts again, but this time with the --stop-on-failure option
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_1",
                    "--stop-on-failure",
                    bash,
                    script_1,
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_2",
                    "--dependencies",
                    "1",
                    "--parametric",
                    "1-7:2",
                    "--stop-on-failure",
                    bash,
                    script_2,
                ]
            )
            jman.main(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "submit",
                    "--log-dir",
                    self.log_dir,
                    "--name",
                    "test_3",
                    "--dependencies",
                    "1",
                    "2",
                    "--exec-dir",
                    rdir,
                    "--stop-on-failure",
                    bash,
                    "test_array.sh",
                ]
            )

            # and execute them, but without writing the log files
            self.scheduler_job = subprocess.Popen(
                [
                    self.jman,
                    "--local",
                    "--database",
                    self.database,
                    "run-scheduler",
                    "--sleep-time",
                    "0.1",
                    "--parallel",
                    "2",
                    "--die-when-finished",
                    "--no-log-files",
                ]
            )
            # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
            self.scheduler_job.wait()
            self.scheduler_job = None

            # assert that the log files are not there
            self.assertFalse(os.path.isfile(out_file))
            self.assertFalse(os.path.isfile(err_file))

            # check that all array jobs are finished now
            session = job_manager.lock()
            jobs = list(session.query(Job))
            self.assertEqual(len(jobs), 3)
            self.assertEqual(jobs[0].status, "failure")
            self.assertEqual(jobs[0].result, 255)
            self.assertEqual(jobs[1].status, "failure")
            self.assertTrue(jobs[1].result is None)
            self.assertEqual(jobs[2].status, "failure")
            self.assertTrue(jobs[2].result is None)
            job_manager.unlock()

            # and clean up again
            jman.main(
                [self.jman, "--local", "--database", self.database, "delete"]
            )
            self.assertEqual(len(os.listdir(self.temp_dir)), 0)

        except KeyboardInterrupt:
            # make sure that the keyboard interrupt is captured and the mess is cleaned up (i.e. by calling tearDown)
            pass
Пример #7
0
 def checkLoadBefore(self):
     # Most of the time this works, but sometimes it fails an internal assertion,
     # most commonly seen on AppVeyor.
     # https://ci.appveyor.com/project/jamadden/relstorage/builds/26243441/job/p24ocr2ir6wpvg3v#L1087
     raise unittest.SkipTest("Assumes it can control timestamps")
Пример #8
0
 def test_error_message_includes_stage(self):
     raise unittest.SkipTest("BEAM-6019")
Пример #9
0
 def test_error_traceback_includes_user_code(self):
     raise unittest.SkipTest("BEAM-6019")
Пример #10
0
 def test_no_subtransform_composite(self):
     raise unittest.SkipTest("BEAM-4781")
Пример #11
0
 def test_pardo_timers(self):
     # TODO Enable once BEAM-5999 is fixed.
     raise unittest.SkipTest(
         "BEAM-4681 - User timers not yet supported.")
    def do_solve(self, solver_name, solver_index):
        if self.haveSolverType(name=solver_name) is False:
            msg = '%r solver is not available!' % solver_name
            raise unittest.SkipTest(msg)

        # Open the Maya file
        file_name = 'solverDeviationCalculation.ma'
        path = self.get_data_path('scenes', file_name)
        maya.cmds.file(path, open=True, force=True, ignoreVersion=True)

        mkr_topRight = 'topRight_01_MKR'
        bnd_topRight = 'topRight_01_BND'
        mkr_middleLeft = 'middleLeft_01_MKR'
        bnd_middleLeft = 'middleLeft_01_BND'
        mkr_middleTop = 'middleTop_01_MKR'
        bnd_middleTop = 'middleTop_01_BND'
        mkr_topLeft = 'topLeft_01_MKR'
        bnd_topLeft = 'topLeft_01_BND'
        cam_tfm = '|camera1'
        cam_shp = '|camera1|cameraShape1'
        image_width = 2048.0
        image_height = 2048.0 / 1.777777777

        mkr_topRight_values = maya.cmds.mmReprojection(
            mkr_topRight,
            camera=(cam_tfm, cam_shp),
            time=(1001.0),
            imageResolution=(image_width, image_height),
            asPixelCoordinate=True,
        )
        mkr_topLeft_values = maya.cmds.mmReprojection(
            mkr_topLeft,
            camera=(cam_tfm, cam_shp),
            time=(1001.0),
            imageResolution=(image_width, image_height),
            asPixelCoordinate=True,
        )
        mkr_middleTop_values = maya.cmds.mmReprojection(
            mkr_middleTop,
            camera=(cam_tfm, cam_shp),
            time=(1001.0),
            imageResolution=(image_width, image_height),
            asPixelCoordinate=True,
        )
        mkr_middleLeft_values = maya.cmds.mmReprojection(
            mkr_middleLeft,
            camera=(cam_tfm, cam_shp),
            time=(1001.0),
            imageResolution=(image_width, image_height),
            asPixelCoordinate=True,
        )

        cameras = (
            (cam_tfm, cam_shp),
        )
        markers = (
            (mkr_topRight, cam_shp, bnd_topRight),
            (mkr_middleLeft, cam_shp, bnd_middleLeft),
            (mkr_topLeft, cam_shp, bnd_topLeft),
            (mkr_middleTop, cam_shp, bnd_middleTop),
        )
        # Note: For this test, we do not need any attributes to be solved.
        node_attrs = []
        frames = [
            1,
        ]

        kwargs = {
            'camera': cameras,
            'marker': markers,
            'attr': node_attrs,
        }

        affects_mode = 'addAttrsToMarkers'
        self.runSolverAffects(affects_mode, **kwargs)

        # Print Statistics
        result = maya.cmds.mmSolver(
            frame=frames,
            solverType=solver_index,
            printStatistics=('deviation', 'inputs'),
            removeUnusedMarkers=False,
            **kwargs
        )
        num_params = result[0]
        num_errors = result[1]
        print('result:', result)
        self.assertEqual(num_params, 'numberOfParameters=0')
        self.assertEqual(num_errors, 'numberOfErrors=8')
        print('mkr_topRight_values:', mkr_topRight_values)
        print('mkr_topLeft_values:', mkr_topLeft_values)
        print('mkr_middleTop_values:', mkr_middleTop_values)
        print('mkr_middleLeft_values:', mkr_middleLeft_values)
        eps = 0.00001
        self.assertTrue(self.approx_equal(mkr_topRight_values[0], 2048.0, eps=eps))
        self.assertTrue(self.approx_equal(mkr_topRight_values[1], 1258.6666666, eps=eps))
        self.assertTrue(self.approx_equal(mkr_topLeft_values[0], 0.0, eps=eps))
        self.assertTrue(self.approx_equal(mkr_topLeft_values[1], 1258.6666666, eps=eps))
        self.assertTrue(self.approx_equal(mkr_middleTop_values[0], 1024.0, eps=eps))
        self.assertTrue(self.approx_equal(mkr_middleTop_values[1], 1258.6666666, eps=eps))
        self.assertTrue(self.approx_equal(mkr_middleLeft_values[0], 0.0, eps=eps))
        self.assertTrue(self.approx_equal(mkr_middleLeft_values[1], 576.0, eps=eps))
        return
Пример #13
0
 def test_nested(self):
     raise unittest.SkipTest(
         "Needs custom trusted-container feature in singularity.")
     self.assertTrue(nested())
 def test_copy_paste_unicode(self):
     if not self.supports_unicode:
         raise unittest.SkipTest()
     msg = u"ಠ_ಠ"
     self.copy(msg)
     self.assertEqual(self.paste(), msg)
Пример #15
0
 def test_actual_expiry(self):
     if isinstance(caches[DEFAULT_CACHE_ALIAS].client._serializer,
                   MSGPackSerializer):
         raise unittest.SkipTest(
             "msgpack serializer doesn't support datetime serialization")
     super(SessionTests, self).test_actual_expiry()
Пример #16
0
 def test_distrib_voi(self):
     raise unittest.SkipTest("distrib vois no supported yet")
Пример #17
0
 def setUpClass(cls):
     if SDL_Init(SDL_INIT_TIMER) != 0:
         raise unittest.SkipTest('Timer subsystem not supported')
from kubernetes.client.rest import ApiException
from subprocess import check_call
import mock
import json
from airflow.contrib.kubernetes.pod_launcher import PodLauncher
from airflow.contrib.kubernetes.volume_mount import VolumeMount
from airflow.contrib.kubernetes.volume import Volume

try:
    check_call(["/usr/local/bin/kubectl", "get", "pods"])
except Exception as e:
    if os.environ.get('KUBERNETES_VERSION'):
        raise e
    else:
        raise unittest.SkipTest(
            "Kubernetes integration tests require a minikube cluster;"
            "Skipping tests {}".format(e))


class KubernetesPodOperatorTest(unittest.TestCase):
    @staticmethod
    def test_config_path_move():
        new_config_path = '/tmp/kube_config'
        old_config_path = os.path.expanduser('~/.kube/config')
        shutil.copy(old_config_path, new_config_path)

        k = KubernetesPodOperator(namespace='default',
                                  image="ubuntu:16.04",
                                  cmds=["bash", "-cx"],
                                  arguments=["echo 10"],
                                  labels={"foo": "bar"},
import os
import signal
import socket
import sys
import time
import threading
import unittest
from unittest import mock

if sys.platform != 'win32':
    raise unittest.SkipTest('Windows only')

import _overlapped
import _winapi

import asyncio
from asyncio import windows_events
from test.test_asyncio import utils as test_utils


def tearDownModule():
    asyncio.set_event_loop_policy(None)


class UpperProto(asyncio.Protocol):
    def __init__(self):
        self.buf = []

    def connection_made(self, trans):
        self.trans = trans
Пример #20
0
 def wrapper(func, *args, **kwargs):
     if isinstance(hl.utils.java.Env.backend(), SparkBackend):
         return func(*args, **kwargs)
     else:
         raise unittest.SkipTest('requires Spark')
Пример #21
0
 def checkUndoMultipleConflictResolution(self, *_args, **_kwargs):  # pylint:disable=signature-differs
     # pylint:disable=arguments-differ
     # 4.2.3 and above add this. it's an exotic feature according to jimfulton.
     raise unittest.SkipTest("conflict-resolving undo not supported")
Пример #22
0
 def wrapper(func, *args, **kwargs):
     if isinstance(hl.utils.java.Env.backend(), ServiceBackend):
         raise unittest.SkipTest(message)
     else:
         return func(*args, **kwargs)
Пример #23
0
 def setUp(self):
     self.ist = None
     raise unittest.SkipTest('Base test')
Пример #24
0
 def testSparse(self):
     raise unittest.SkipTest("no sparse support")
Пример #25
0
 def func():
     raise unittest.SkipTest(skipmsg)
Пример #26
0
 def wrapper(self, *args, **kwargs):
     if self.browser.name == browser:
         raise unittest.SkipTest(
             'Skipping as this test will not work with {}'.format(
                 browser))
     test_function(self, *args, **kwargs)
Пример #27
0
import unittest
from zerolib.storage import RWLock
try:
    from timeout_decorator import timeout
    from timeout_decorator.timeout_decorator import TimeoutError as ExpectedTimeout
except ImportError:
    raise unittest.SkipTest('Cannot import timeout_decorator')


def assert_timeout(seconds):
    def decorator(func):
        def f(self, *args, **kwargs):
            func_timeout = timeout(seconds)(func)
            with self.assertRaises(ExpectedTimeout):
                func_timeout(self, *args, **kwargs)

        return f

    return decorator


class TestRWLock(unittest.TestCase):
    @timeout(3)
    def test_readers(self):
        rwlock = RWLock()
        for i in range(10):
            rwlock.acquire('r')
        for i in range(10):
            rwlock.release('r')

        with self.assertRaises(ValueError):
Пример #28
0
    def wrapper(*args: Any, **kwargs: Any) -> Any:
        if connection.vendor != 'postgresql':
            raise unittest.SkipTest('Database is not using postgresql')

        return original(*args, **kwargs)
Пример #29
0
 def setUp(self):
     if not (self.tc.hasHostPackage("nativesdk-python3-core")
             or self.tc.hasHostPackage("python3-core-native")):
         raise unittest.SkipTest("No python3 package in the SDK")
Пример #30
0
 def _get_backups(self):
     for dir in self.BACKUP_DIRS:
         if os.path.exists(dir):
             _, dirnames, _ = next(os.walk(dir))
             return dirnames
     raise unittest.SkipTest('Cannot find backups dir')