def test_connecting_with_compression(self):
     try:
         import snappy
     except ImportError:
         if hasattr(unittest, 'skipTest'):
             unittest.skipTest('Snappy compression not available')
         else:
             return
     conn = cql.connect(TEST_HOST, TEST_NATIVE_PORT, native=True, compression=True)
     self.assertEqual(conn.compressor, snappy.compress)
     self.try_basic_stuff(conn)
 def test_connecting_with_compression(self):
     try:
         import snappy
     except ImportError:
         if hasattr(unittest, 'skipTest'):
             unittest.skipTest('Snappy compression not available')
         else:
             return
     conn = cql.connect(TEST_HOST, TEST_NATIVE_PORT, native=True, compression=True)
     self.assertEqual(conn.compressor, snappy.compress)
     self.try_basic_stuff(conn)
Exemplo n.º 3
0
 def test_ccscheck_numerrors(self):
     if self.ref_fasta is None:
         raise unittest.SkipTest("Comparison FASTA file not specified")
     elif self.ref_csv is None:
         raise unittest.skipTest("No CSV file defined")
     elif not op.isfile(op.realpath(self.ref_fasta)):
         self.fail("{f} is not a file".format(f=self.ref_fasta))
     ref_lines = {}
     with open(self.ref_csv) as csv_ref:
         c = csv.reader(csv_ref, delimiter=',')
         for row in c:
             if row[0] != "Movie":
                 ref_lines[(row[0], row[1])] = row
     for i_file, file_name in enumerate(self.ccs_ds.toExternalFiles()):
         out_dir = op.join(self.run_dir, "out_{i}".format(i=i_file))
         args = ["ccscheck", file_name, out_dir, self.ref_fasta]
         assert subprocess.call(args) == 0, \
             "cmd '{a}' failed".format(a=" ".join(args))
         csv_file = op.join(out_dir, "zmws.csv")
         with open(csv_file) as f:
             c = csv.reader(f, delimiter=',')
             for row in c:
                 if row[0] != "Movie":
                     csv_ref = ref_lines.get((row[0], row[1]), None)
                     if csv_ref is None:
                         log.warn("{m}/{z} not found".format(m=row[0],
                                                             z=row[1]))
                     else:
                         self.assertEqual(row, csv_ref)
Exemplo n.º 4
0
 def test_ccscheck_numerrors(self):
     if self.ref_fasta is None:
         raise unittest.SkipTest("Comparison FASTA file not specified")
     elif self.ref_csv is None:
         raise unittest.skipTest("No CSV file defined")
     elif not op.isfile(op.realpath(self.ref_fasta)):
         self.fail("{f} is not a file".format(f=self.ref_fasta))
     ref_lines = {}
     with open(self.ref_csv) as csv_ref:
         c = csv.reader(csv_ref, delimiter=',')
         for row in c:
             if row[0] != "Movie":
                 ref_lines[(row[0],row[1])] = row
     for i_file, file_name in enumerate(self.ccs_ds.toExternalFiles()):
         out_dir = op.join(self.run_dir, "out_{i}".format(i=i_file))
         args = ["ccscheck", file_name, out_dir, self.ref_fasta]
         assert subprocess.call(args) == 0, \
             "cmd '{a}' failed".format(a=" ".join(args))
         csv_file = op.join(out_dir, "zmws.csv")
         with open(csv_file) as f:
             c = csv.reader(f, delimiter=',')
             for row in c:
                 if row[0] != "Movie":
                     csv_ref = ref_lines.get((row[0], row[1]), None)
                     if csv_ref is None:
                         log.warn("{m}/{z} not found".format(
                                  m=row[0], z=row[1]))
                     else:
                         self.assertEqual(row, csv_ref)
Exemplo n.º 5
0
    def test_combine_dimensional_operators_0(self):
        if self.F.operators[0].solve_banded_offsets[1] != 2:
            unittest.skipTest(
                "Using first order boundary approximation. Top is tridiag.")
        oldL1 = self.L1_.copy()
        oldL1 = scipy.sparse.dia_matrix(oldL1.todense())
        oldL1.data = oldL1.data[::-1]
        oldL1.offsets = oldL1.offsets[::-1]
        # high, low = 2, -2
        # m = tuple(oldL1.offsets).index(0)
        # oldL1.data = oldL1.data[m-high:m-low+1]
        # oldL1.offsets = oldL1.offsets[m-high:m-low+1]

        oldR1 = self.R1_.T.flatten()

        L1 = self.F.operators[0]

        # oldL1.data = oldL1.data[:-1]
        # oldL1.offsets = oldL1.offsets[:-1]

        # print "offsets"
        # print oldL1.offsets, L1.D.offsets
        # print "old"
        # fp(oldL1.data)
        # print
        # print "new"
        # fp(L1.D.data)
        # print
        # print "diff"
        # fp(L1.D.data - oldL1.data)
        # print
        # print "old"
        # fp(oldL1.todense())
        # print
        # print "new"
        # fp(L1.D.todense())
        # print
        # print "diff"
        # fp(oldL1.todense() - L1.D.todense())
        # npt.assert_allclose(L1.D.todense(), oldL1.todense())
        # npt.assert_allclose(L1.D.data, oldL1.data)
        # print "old"
        # print oldR1
        # print
        # print "new"
        # print L1.R
        npt.assert_allclose(L1.R, oldR1)
    def test_combine_dimensional_operators_0(self):
        if self.F.operators[0].solve_banded_offsets[1] != 2:
            unittest.skipTest("Using first order boundary approximation. Top is tridiag.")
        oldL1 = self.L1_.copy()
        oldL1 = scipy.sparse.dia_matrix(oldL1.todense())
        oldL1.data = oldL1.data[::-1]
        oldL1.offsets = oldL1.offsets[::-1]
        # high, low = 2, -2
        # m = tuple(oldL1.offsets).index(0)
        # oldL1.data = oldL1.data[m-high:m-low+1]
        # oldL1.offsets = oldL1.offsets[m-high:m-low+1]

        oldR1 = self.R1_.T.flatten()

        L1 = self.F.operators[0]

        # oldL1.data = oldL1.data[:-1]
        # oldL1.offsets = oldL1.offsets[:-1]

        # print "offsets"
        # print oldL1.offsets, L1.D.offsets
        # print "old"
        # fp(oldL1.data)
        # print
        # print "new"
        # fp(L1.D.data)
        # print
        # print "diff"
        # fp(L1.D.data - oldL1.data)
        # print
        # print "old"
        # fp(oldL1.todense())
        # print
        # print "new"
        # fp(L1.D.todense())
        # print
        # print "diff"
        # fp(oldL1.todense() - L1.D.todense())
        # npt.assert_allclose(L1.D.todense(), oldL1.todense())
        # npt.assert_allclose(L1.D.data, oldL1.data)
        # print "old"
        # print oldR1
        # print
        # print "new"
        # print L1.R
        npt.assert_allclose(L1.R, oldR1)
Exemplo n.º 7
0
 def get_real_credentials(self):
     dir_name = os.path.dirname(__file__)
     file_name = "test.conf.json"
     try:
         with open(os.path.join(dir_name, file_name), "r") as handler:
             return json.load(handler)
     except IOError:
         raise unittest.skipTest(
             "For all tests implying a real connection to the Project Euler "
             "website, please create a file named {} in {} which is a json "
             "file containing something like : "
             """{"username": "******", "password": "******"}"""
         )
Exemplo n.º 8
0
 def setUpClass(cls):
     super(TestAccuracy, cls).setUpClass()
     ref_fasta = cls.test_values["ccs"].get("reference", None)
     cls.ref_csv = cls.test_values["ccs"].get("ccscheck_out", None)
     if cls.ref_csv is None:
         raise unittest.skipTest("No CSV file defined")
     ref_dir = op.dirname(ref_fasta)
     cls.run_dir = tempfile.mkdtemp()
     tmp_ref_fasta = op.join(cls.run_dir, op.basename(ref_fasta))
     shutil.copyfile(ref_fasta, tmp_ref_fasta)
     cls.ref_fasta = tmp_ref_fasta
     pysam.faidx(tmp_ref_fasta)
     cls.final_ccs_file = None
     for file_id, file_info in cls.datastore.get_file_dict().iteritems():
         if file_info.is_chunked:
             continue
         if file_info.file_type_id == FileTypes.DS_CCS.file_type_id:
             cls.final_ccs_file = file_info.path
             break
     cls.ccs_ds = ConsensusReadSet(cls.final_ccs_file)
Exemplo n.º 9
0
 def setUpClass(cls):
     super(TestAccuracy, cls).setUpClass()
     ref_fasta = cls.test_values["ccs"].get("reference", None)
     cls.ref_csv = cls.test_values["ccs"].get("ccscheck_out", None)
     if cls.ref_csv is None:
         raise unittest.skipTest("No CSV file defined")
     ref_dir = op.dirname(ref_fasta)
     cls.run_dir = tempfile.mkdtemp()
     tmp_ref_fasta = op.join(cls.run_dir, op.basename(ref_fasta))
     shutil.copyfile(ref_fasta, tmp_ref_fasta)
     cls.ref_fasta = tmp_ref_fasta
     pysam.faidx(tmp_ref_fasta)
     cls.final_ccs_file = None
     for file_id, file_info in cls.datastore.get_file_dict().iteritems():
         if file_info.is_chunked:
             continue
         if file_info.file_type_id == FileTypes.DS_CCS.file_type_id:
             cls.final_ccs_file = file_info.path
             break
     cls.ccs_ds = ConsensusReadSet(cls.final_ccs_file)
Exemplo n.º 10
0
import unittest
import os
import requests
import datetime
import time

skipSeleniumTests = None
try:
    from selenium import webdriver
    from selenium.webdriver.common.keys import Keys
except ImportError:
    skipSeleniumTests = lambda: unittest.skipTest(
        "Selenium not installed.")

import utils
import sdk

class Link(unittest.TestCase):

    def setUp(self):
        self.test_file = utils.create_test_file(self.account)
        self.link = self.account.links.create(data={'file_id': self.test_file.id})

    def tearDown(self):
        self.link.delete()

    def test_create_link(self):
        self.assertEqual(self.link.file_id, self.test_file.id)

    def test_create_direct_link(self):
        self.link2 = self.account.links.create(data={
Exemplo n.º 11
0
import unittest
import os
import requests
import datetime
import time

skipSeleniumTests = None
try:
    from selenium import webdriver
    from selenium.webdriver.common.keys import Keys
except ImportError:
    skipSeleniumTests = lambda: unittest.skipTest("Selenium not installed.")

import utils
import sdk


class Link(unittest.TestCase):
    def setUp(self):
        self.test_file = utils.create_test_file(self.account)
        self.link = self.account.links.create(
            data={'file_id': self.test_file.id})

    def tearDown(self):
        self.link.delete()

    def test_create_link(self):
        self.assertEqual(self.link.file_id, self.test_file.id)

    def test_create_direct_link(self):
        self.link2 = self.account.links.create(data={
Exemplo n.º 12
0
 def setUp(self):
     setup_logger()
     if os.environ.get("CIRCLECI"):
         unittest.skipTest("Require COCO data and model zoo.")
Exemplo n.º 13
0
 def setUp(self):
     try:
         subprocess.call(['mpirun', '--version'])
     except FileNotFoundError:
         unittest.skipTest('mpirun not found, skipping tests')