Example #1
0
    def test_subprocess(self):
        """Instead of the ``argv`` shortcut, subclasses can also use the
        ``subprocess`` helper manually.
        """

        class Filter(ExternalTool): pass

        # Without stdin data
        self.popen.return_value.returncode = 0
        self.popen.return_value.communicate.return_value = ['stdout', 'stderr']
        out = StringIO()
        Filter.subprocess(['test'], out)
        assert out.getvalue() == 'stdout'
        self.popen.return_value.communicate.assert_called_with(None)

        # With stdin data
        self.popen.reset_mock()
        self.popen.return_value.returncode = 0
        self.popen.return_value.communicate.return_value = ['stdout', 'stderr']
        out = StringIO()
        Filter.subprocess(['test'], out, data='data')
        assert out.getvalue() == 'stdout'
        self.popen.return_value.communicate.assert_called_with('data')

        # With error
        self.popen.return_value.returncode = 1
        self.popen.return_value.communicate.return_value = ['stdout', 'stderr']
        assert_raises(FilterError, Filter.subprocess, ['test'], StringIO())
Example #2
0
    def toXml(self, filename=u''):
        """
        converts the document to a valid Xml format.
        @param filename unicode string: the name of a file, defaults to
        an empty string.
        @return if filename is not empty, the XML code will be written into it
        and the method returns None; otherwise the method returns a StringIO
        containing valid XML.
        Then a ".getvalue()" should return a unicode string.
        """
        assert(type(filename)==type(u""))

        result=None
        xml=StringIO()
        if sys.version_info[0]==2:
            xml.write(_XMLPROLOGUE)
        else:
            xml.write(_XMLPROLOGUE)
        self.body.toXml(0, xml)
        if not filename:
            result=xml.getvalue()
        else:
            f=codecs.open(filename,'w', encoding='utf-8')
            f.write(xml.getvalue())
            f.close()
        return result
Example #3
0
    def test_low_mapq(self):
        """ We no longer fail reads because of low mapq.

        When we use more than one reference, reads can receive low mapq if they
        are in a conserved region that matches more than one reference.
        """
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,99,INT,1,8,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,147,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
INT,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
V3LOOP,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
"""
        expected_failed_csv = """\
qname,cause
"""
        actual_aligned_csv = StringIO()
        actual_failed_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                failed_csv=actual_failed_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_failed_csv,
                                  actual_failed_csv.getvalue())
Example #4
0
    def test_info_max_cols(self):
        df = DataFrame(np.random.randn(10, 5))
        for len_, verbose in [(5, None), (5, False), (10, True)]:
            # For verbose always      ^ setting  ^ summarize ^ full output
            with option_context('max_info_columns', 4):
                buf = StringIO()
                df.info(buf=buf, verbose=verbose)
                res = buf.getvalue()
                assert len(res.strip().split('\n')) == len_

        for len_, verbose in [(10, None), (5, False), (10, True)]:

            # max_cols no exceeded
            with option_context('max_info_columns', 5):
                buf = StringIO()
                df.info(buf=buf, verbose=verbose)
                res = buf.getvalue()
                assert len(res.strip().split('\n')) == len_

        for len_, max_cols in [(10, 5), (5, 4)]:
            # setting truncates
            with option_context('max_info_columns', 4):
                buf = StringIO()
                df.info(buf=buf, max_cols=max_cols)
                res = buf.getvalue()
                assert len(res.strip().split('\n')) == len_

            # setting wouldn't truncate
            with option_context('max_info_columns', 5):
                buf = StringIO()
                df.info(buf=buf, max_cols=max_cols)
                res = buf.getvalue()
                assert len(res.strip().split('\n')) == len_
Example #5
0
    def test_low_read_quality(self):
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,000000000000000000AAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,000000000000000000AAAAAAAAAAAAAA
Example_read_2,99,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_2,147,INT,1,44,32M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
INT,15,0,1,0,TGTACAAGACCCAACAACAATACAAGAAAAAG
"""
        expected_failed_csv = """\
qname,cause
Example_read_1,manyNs
"""
        actual_aligned_csv = StringIO()
        actual_failed_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                failed_csv=actual_failed_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_failed_csv,
                                  actual_failed_csv.getvalue())
Example #6
0
    def assertBlock(self, python, java):
        self.maxDiff = None
        dump = False

        py_block = PyBlock(parent=PyModule('test', 'test.py'))
        if python:
            python = adjust(python)
            code = compile(python, '<test>', 'exec')
            py_block.extract(code, debug=dump)

        java_code = py_block.transpile()

        out = BytesIO()
        constant_pool = ConstantPool()
        java_code.resolve(constant_pool)

        constant_pool.add(Utf8('test'))
        constant_pool.add(Utf8('Code'))
        constant_pool.add(Utf8('LineNumberTable'))

        writer = ClassFileWriter(out, constant_pool)
        java_code.write(writer)

        debug = StringIO()
        reader = ClassFileReader(BytesIO(out.getbuffer()), constant_pool, debug=debug)
        JavaCode.read(reader, dump=0)

        if dump:
            print(debug.getvalue())

        java = adjust(java)
        self.assertEqual(debug.getvalue(), java[1:])
Example #7
0
    def test_insertion(self):
        remap_file = StringIO("""\
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq,qual
Example_read_1,99,V3LOOP,1,44,12M6I14M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Example_read_1,147,V3LOOP,1,44,12M6I14M,=,1,-32,TGTACAAGACCCAACAACAATACAAGAAAAAG,AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
""")
        expected_aligned_csv = """\
refname,qcut,rank,count,offset,seq
V3LOOP,15,0,1,0,TGTACAAGACCCAATACAAGAAAAAG
"""
        expected_insert_csv = """\
qname,fwd_rev,refname,pos,insert,qual
Example_read_1,F,V3LOOP,12,AACAAC,AAAAAA
Example_read_1,R,V3LOOP,12,AACAAC,AAAAAA
"""
        actual_aligned_csv = StringIO()
        actual_insert_csv = StringIO()
        sam2aln(remap_file,
                actual_aligned_csv,
                actual_insert_csv)

        self.assertMultiLineEqual(expected_aligned_csv,
                                  actual_aligned_csv.getvalue())
        self.assertMultiLineEqual(expected_insert_csv,
                                  actual_insert_csv.getvalue())
Example #8
0
class TFramedTransport(TTransportBase, CReadableTransport):
  """Class that wraps another transport and frames its I/O when writing."""

  def __init__(self, trans,):
    self.__trans = trans
    self.__rbuf = StringIO()
    self.__wbuf = StringIO()

  def isOpen(self):
    return self.__trans.isOpen()

  def open(self):
    return self.__trans.open()

  def close(self):
    return self.__trans.close()

  def read(self, sz):
    ret = self.__rbuf.read(sz)
    if len(ret) != 0:
      return ret

    self.readFrame()
    return self.__rbuf.read(sz)

  def readFrame(self):
    buff = self.__trans.readAll(4)
    sz, = unpack('!i', buff)
    self.__rbuf = StringIO(self.__trans.readAll(sz))

  def write(self, buf):
    self.__wbuf.write(buf)

  def flush(self):
    wout = self.__wbuf.getvalue()
    wsz = len(wout)
    # reset wbuf before write/flush to preserve state on underlying failure
    self.__wbuf = StringIO()
    # N.B.: Doing this string concatenation is WAY cheaper than making
    # two separate calls to the underlying socket object. Socket writes in
    # Python turn out to be REALLY expensive, but it seems to do a pretty
    # good job of managing string buffer operations without excessive copies
    buf = pack("!i", wsz) + wout
    self.__trans.write(buf)
    self.__trans.flush()

  # Implement the CReadableTransport interface.
  @property
  def cstringio_buf(self):
    return self.__rbuf

  def cstringio_refill(self, prefix, reqlen):
    # self.__rbuf will already be empty here because fastbinary doesn't
    # ask for a refill until the previous buffer is empty.  Therefore,
    # we can start reading new frames immediately.
    while len(prefix) < reqlen:
      self.readFrame()
      prefix += self.__rbuf.getvalue()
    self.__rbuf = StringIO(prefix)
    return self.__rbuf
Example #9
0
 def cmd(self, *args, **kw):
     exit_code = kw.get('exit_code', 0)
     fork = kw.get('fork', False)
     if fork:
         try:
             output = subprocess.check_output((sys.executable, '-m', 'borg.archiver') + args)
             ret = 0
         except subprocess.CalledProcessError as e:
             output = e.output
             ret = e.returncode
         output = os.fsdecode(output)
         if ret != exit_code:
             print(output)
         self.assert_equal(exit_code, ret)
         return output
     args = list(args)
     stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
     try:
         sys.stdin = StringIO()
         output = StringIO()
         sys.stdout = sys.stderr = output
         ret = self.archiver.run(args)
         sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
         if ret != exit_code:
             print(output.getvalue())
         self.assert_equal(exit_code, ret)
         return output.getvalue()
     finally:
         sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
Example #10
0
def test_ecsv_mixins_as_one(table_cls):
    """Test write/read all cols at once and validate intermediate column names"""
    names = sorted(mixin_cols)

    serialized_names = ['ang',
                        'dt',
                        'el.x', 'el.y', 'el.z',
                        'lat',
                        'lon',
                        'q',
                        'sc.ra', 'sc.dec',
                        'scc.x', 'scc.y', 'scc.z',
                        'scd.ra', 'scd.dec', 'scd.distance',
                        'scd.obstime',
                        'tm',  # serialize_method is formatted_value
                        'tm2',  # serialize_method is formatted_value
                        'tm3.jd1', 'tm3.jd2',    # serialize is jd1_jd2
                        'tm3.location.x', 'tm3.location.y', 'tm3.location.z']

    t = table_cls([mixin_cols[name] for name in names], names=names)

    out = StringIO()
    t.write(out, format="ascii.ecsv")
    t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')

    assert t.colnames == t2.colnames

    # Read as a ascii.basic table (skip all the ECSV junk)
    t3 = table_cls.read(out.getvalue(), format='ascii.basic')
    assert t3.colnames == serialized_names
Example #11
0
 def scenario_parse_args_exits(self, argv):
     with self.assertRaises(SystemExit) as cm:
         stdout = StringIO()
         stderr = StringIO()
         with Redirect(stdout=stdout, stderr=stderr):
             parsed_args = args.parse(argv)
     return (stdout.getvalue(), stderr.getvalue(), cm.exception.code)
Example #12
0
 def test_include_partitions(self):
     """inspectdb --include-partitions creates models for partitions."""
     with connection.cursor() as cursor:
         cursor.execute('''\
             CREATE TABLE inspectdb_partition_parent (name text not null)
             PARTITION BY LIST (left(upper(name), 1))
         ''')
         cursor.execute('''\
             CREATE TABLE inspectdb_partition_child
             PARTITION OF inspectdb_partition_parent
             FOR VALUES IN ('A', 'B', 'C')
         ''')
     out = StringIO()
     partition_model_parent = 'class InspectdbPartitionParent(models.Model):'
     partition_model_child = 'class InspectdbPartitionChild(models.Model):'
     partition_managed = 'managed = False  # Created from a partition.'
     try:
         call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out)
         no_partitions_output = out.getvalue()
         self.assertIn(partition_model_parent, no_partitions_output)
         self.assertNotIn(partition_model_child, no_partitions_output)
         self.assertNotIn(partition_managed, no_partitions_output)
         call_command('inspectdb', table_name_filter=inspectdb_tables_only, include_partitions=True, stdout=out)
         with_partitions_output = out.getvalue()
         self.assertIn(partition_model_parent, with_partitions_output)
         self.assertIn(partition_model_child, with_partitions_output)
         self.assertIn(partition_managed, with_partitions_output)
     finally:
         with connection.cursor() as cursor:
             cursor.execute('DROP TABLE IF EXISTS inspectdb_partition_child')
             cursor.execute('DROP TABLE IF EXISTS inspectdb_partition_parent')
Example #13
0
    def runTest(self): # noqa
        try:
            out = StringIO()
            sys.stdout = out

            logger = Mann(console=True)
            logger.log('foo')

            output = out.getvalue().strip()

            self.assertEqual(output, 'foo')
        finally:
            sys.stdout = sys.__stdout__

        try:
            out = StringIO()
            sys.stdout = out

            logger = Mann()
            logger.log('foo')

            output = out.getvalue().strip()

            self.assertEqual(output, '')
        finally:
            sys.stdout = sys.__stdout__
Example #14
0
	def test_handler_args_kwargs(self):
		out = StringIO()
		err = StringIO()
		stream = StringIO()
		file_name = 'test_logging-test_handler_args_kwargs'

		with replace_attr(sys, 'stdout', out, 'stderr', err):
			common_config = finish_common_config('utf-8', {'log_file': [
				['RotatingFileHandler', [[file_name], {'maxBytes': 1, 'backupCount': 1}]]
			]})
			try:
				try:
					logger, pl, get_module_attr = create_logger(common_config, stream=stream)
					pl.error('Foo')
					pl.error('Bar')
					close_handlers(logger)
					with codecs.open(file_name, encoding='utf-8') as fp:
						self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$')
					with codecs.open(file_name + '.1', encoding='utf-8') as fp:
						self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
				finally:
					os.unlink(file_name + '.1')
			finally:
				os.unlink(file_name)
			self.assertEqual(stream.getvalue(), '')
			self.assertEqual(err.getvalue(), '')
			self.assertEqual(out.getvalue(), '')
Example #15
0
    def test_merge_two_csv(self):
        # This test is not fully implemented yet.
        print("\n")

        _tmpstdout = StringIO()
        _tmpstderr = StringIO()

        _call = sys.executable + " " + os.path.abspath(os.path.join(Test_Script_Dir,"..", "optimal_sync.py") +
                                                       " -d " +
                                                       os.path.join(Test_Resource_Dir,"test_merge_two_files.json"))
        print("Call: " + _call)
        try:
            _res = check_output(_call, shell=True, cwd=Test_Script_Dir)
            print("Result : " + str(_res))
        except UnsupportedOperation as e:
            print("Error: " + str(e.args))
        except Exception as e:
            print("Error: " + str(e))
        with open(os.path.join(Test_Resource_Dir, "csv_out.csv")) as _f_out:
            _output = _f_out.read()
        with open(os.path.join(Test_Resource_Dir, "csv_cmp.csv")) as _f_cmp:
            _cmp = _f_cmp.read()
        self.assertEqual(_output, _cmp, diff_strings(_output, _cmp))
        print("out : " + str(_tmpstdout.getvalue()))
        print("err : " + str(_tmpstderr.getvalue()))
        print("Done")
        self.assertEqual(True, True)
def test_fail_gracefully_on_bogus__qualname__and__name__():
    # Test that we correctly repr types that have non-string values for both
    # __qualname__ and __name__

    class Meta(type):
        __name__ = 5

    class Type(object):
        __metaclass__ = Meta
        __qualname__ = 5

    stream = StringIO()
    printer = pretty.RepresentationPrinter(stream)

    printer.pretty(Type)
    printer.flush()
    output = stream.getvalue()

    # If we can't find __name__ or __qualname__ just use a sentinel string.
    expected = '.'.join([__name__, '<unknown type>'])
    assert_equal(output, expected)

    # Clear stream buffer.
    stream.buf = ''

    # Test repring of an instance of the type.
    instance = Type()
    printer.pretty(instance)
    printer.flush()
    output = stream.getvalue()

    # Should look like:
    # <IPython.lib.tests.test_pretty.<unknown type> at 0x7f7658ae07d0>
    prefix = '<' + '.'.join([__name__, '<unknown type>']) + ' at 0x'
    assert_true(output.startswith(prefix))
Example #17
0
 def test_write_multiple_dict_rows(self):
     fileobj = StringIO()
     writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
     writer.writeheader()
     self.assertEqual(fileobj.getvalue(), "f1,f2,f3\r\n")
     writer.writerows([{"f1": 1, "f2": "abc", "f3": "f"}, {"f1": 2, "f2": 5, "f3": "xyz"}])
     self.assertEqual(fileobj.getvalue(), "f1,f2,f3\r\n1,abc,f\r\n2,5,xyz\r\n")
Example #18
0
def multiple_alignment(fasta_dict, alignment_type=SeqTypeData().TYPE_DEFAULT):
    in_handle = StringIO()
    fasta_tools.write_fasta_handle(in_handle, fasta_dict)

    muscle_cmd = SeqTypeData().type2cmd[alignment_type]
    child = subprocess.Popen(str(muscle_cmd), stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             shell=(sys.platform != "win32"))
    if not child:
        print("Process was not created!")
        return

    if sys.version_info[0] == 3:
        child.stdin.write(bytes(in_handle.getvalue(), 'utf-8'))
        child.stdin.close()
        align = AlignIO.read(StringIO("".join(line.decode() for line in child.stdout)), "clustal")
    else:
        child.stdin.write(in_handle.getvalue())
        child.stdin.close()
        align = AlignIO.read(child.stdout, "clustal")
    fd = copy.deepcopy(fasta_dict)
    for a in align:
        fd.set(a.id, str(a.seq))

    return fd
def test_fallback_to__name__on_type():
    # Test that we correctly repr types that have non-string values for
    # __qualname__ by falling back to __name__

    class Type(object):
        __qualname__ = 5

    # Test repring of the type.
    stream = StringIO()
    printer = pretty.RepresentationPrinter(stream)

    printer.pretty(Type)
    printer.flush()
    output = stream.getvalue()

    # If __qualname__ is malformed, we should fall back to __name__.
    expected = '.'.join([__name__, Type.__name__])
    assert_equal(output, expected)

    # Clear stream buffer.
    stream.buf = ''

    # Test repring of an instance of the type.
    instance = Type()
    printer.pretty(instance)
    printer.flush()
    output = stream.getvalue()

    # Should look like:
    # <IPython.lib.tests.test_pretty.Type at 0x7f7658ae07d0>
    prefix = '<' + '.'.join([__name__, Type.__name__]) + ' at 0x'
    assert_true(output.startswith(prefix))
Example #20
0
    def test_serialization(self):
        "m2m-through models aren't serialized as m2m fields. Refs #8134"
        pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}

        out = StringIO()
        management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
        self.assertJSONEqual(
            out.getvalue().strip(),
            '[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": '
            '100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": '
            '"Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]'
            % pks
        )

        out = StringIO()
        management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out)
        self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
  <object pk="%(m_pk)s" model="m2m_through_regress.membership">
    <field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
    <field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
    <field type="IntegerField" name="price">100</field>
  </object>
  <object pk="%(p_pk)s" model="m2m_through_regress.person">
    <field type="CharField" name="name">Bob</field>
  </object>
  <object pk="%(g_pk)s" model="m2m_through_regress.group">
    <field type="CharField" name="name">Roll</field>
  </object>
</django-objects>
        """.strip() % pks)
Example #21
0
 def test_dict(self):
     d = SqliteDict()
     self.run_dict_tests( d)
     
     d = SqliteDict(index=False)
     self.run_dict_tests(d)
     d["extra"] = "item"
     
     d2 = SqliteDict({1:"a", 2:"b", 3:"c", "1":5})
     d.update(d2)
     self.assertEqual(7, len(d))
     self.assertIn(1, d)
     self.assertIn(2, d)
     self.assertEqual(5, d["1"])
     
     d = SqliteDict({1:"a", 2:"b", 3:"c", "1":5}, filename="mydict.sqlite")
     d2 = SqliteDict(filename="mydict.sqlite")
     
     self.assertEqual(set([x for x in d.items()]), set([x for x in d2.items()]))
     
     d = SqliteDict({1:"a", 2:"b", 3:"c", "1":5})
     self.assertEqual(4, len(d))
     d.clear()
     self.assertEqual(0, len(d))
     
     d = SqliteDict({1:"a", 2:"b", 3:"c"})
     io = StringIO()
     d.write(io)
     self.assertEqual({"1":"a", "2":"b", "3":"c"}, json.loads(io.getvalue()))
     
     d = SqliteDict({1:"a", 2:"b", 3:"c"})
     io = StringIO()
     d.write_lines(io)
     values = sorted([line.split("\t") for line in io.getvalue().strip().split("\n")])
     self.assertEqual([["1", '"a"'], ["2", '"b"'], ['3', '"c"']], values)
    def test_printer(self):
        # Test the printer class
        m1 = dsl_mock('m1')
        m2 = dsl_mock('m2')
        m3 = dsl_mock('m3')
        o1 = other_mock('o1')
        f  = StringIO()
        p = printer(endl=';',output_file=f,tab=2)
        p.out( m1 )
        self.assertEqual( f.getvalue(), 'm1;\n' )
        p.indent()
        p.out( 199 )
        self.assertEqual( f.getvalue(), 'm1;\n  199;\n' )
        p.indent()
        p.out( m2 )
        self.assertEqual( f.getvalue(), 'm1;\n  199;\n    m2;\n')
        p.outdent()
        p.outdent()
        p.out( m3 )
        self.assertEqual( f.getvalue(), 'm1;\n  199;\n    m2;\nm3;\n')
        p.out( o1 )
        self.assertEqual( f.getvalue(), 'm1;\n  199;\n    m2;\nm3;\n__o1__;\n')

        # Negative indent not allowed
        self.assertRaises(AssertionError, p.outdent)
Example #23
0
def cbFun(sendRequesthandle, errorIndication, errorStatus, errorIndex,
          varBindTable, cbCtx):
    global oid_list
    global maxRepetitions
    if errorIndication:
        print(errorIndication)
        return  # stop on error
    if errorStatus:
        print('%s at %s' % (
            errorStatus.prettyPrint(),
            errorIndex and varBindTable[-1][int(errorIndex)-1] or '?'
            )
        )
        return  # stop on error
    #print(varBindTable)
    for varBindRow in varBindTable:
        if maxRepetitions == 0:#如果为0
            return#停止,并且返回
        else:
            for oid, val in varBindTable[0]:
                o = StringIO()
                print(oid,file=o)
                oid_get = o.getvalue().strip()#通过print到StringIO进行转码,然后读回
                o.close()
                v = StringIO()
                print(val,file=v)
                val_get = v.getvalue().strip()#通过print到StringIO进行转码,然后读回
                v.close()
                oid_list.append((oid_get,val_get))#把oid和val的对添加到全局清单oid_list
        maxRepetitions -= 1#数量减一
    return True # signal dispatcher to continue walking#返回一个型号,继续往下查询!
 def test_detect_wig_pos(self):
     si.replicate_comparison = self.mock.mock_replicate_comparison
     nums = {'pro': 3, 'tss': 3, 'uni': 0, 'cds': 3, 'ta': 3}
     out_table = StringIO()
     output = StringIO()
     args = self.mock_args.mock()
     args.texs = "texs"
     args.replicates = "rep"
     args.max_len = 300
     args.min_len = 30
     args.decrease_inter = 50
     args.fuzzy_inter = 5
     args.tex_notex = "tex_notex"
     args.pros = copy.deepcopy(self.example.pros)
     tas = copy.deepcopy(self.example.tas)
     args.table_best = True
     args.nums = nums
     args.out_table = out_table
     args.output = output
     args.tolerance = 5
     si.detect_wig_pos(self.example.wigs, tas[0], 20, 70, "TSS_160+",
                       10, 20, args)
     self.assertEqual(output.getvalue(),
                      ("aaa\tANNOgesic\tncRNA\t20\t190\t.\t+\t.\t"
                       "ID=aaa_srna0;Name=sRNA_00000;sRNA_type=intergenic;"
                       "with_TSS=TSS_160+;end_cleavage=Cleavage:190_+;"
                       "best_avg_coverage=40;best_high_coverage=50;"
                       "best_low_coverage=10\n"))
     self.assertEqual(out_table.getvalue(),
                      ("aaa\t00000\t20\t190\t+\tcond1\t"
                       "test1\t40\t50\t10\t\n"))
Example #25
0
class BaseReporter(object):

    """Reporter Object to collate the failed tests"""

    def __init__(self):

        self.report_logger = logging.getLogger('servercheck')

        self.failed_tests = StringIO()

        report_handler = logging.StreamHandler(self.failed_tests)
        report_handler.addFilter(FailFilter())
        report_handler.setLevel(logging.WARNING)

        self.report_logger.addHandler(report_handler)

    def report(self):
        failed_tests = self.failed_tests.getvalue()

        msg = '----- Report: -----\n\n'
        if failed_tests:
            msg += self.failed_tests.getvalue()
            log_level = 'warn'
        else:
            msg += '\033[1;32mAll tests passed, YAY!\033[0m\n'
            log_level = 'info'

        msg += '\n-------------------'

        getattr(self.report_logger, log_level)(msg)
Example #26
0
    def render(self, data, accepted_media_type=None, renderer_context=None):
        """
        Renders `data` into serialized XML.
        """
        if data is None:
            return ''

        stream = StringIO()

        xml = SimplerXMLGenerator(stream, self.charset)
        xml.startDocument()
        # If we do not have users or request_user then we have errors
        if not data.get('users', False) and not data.get('request_user', False):
            self._to_errors(data, xml)
            xml.endDocument()
            return stream.getvalue()

        # If users are a list, deal with that
        if type(data['users']) is QuerySet or type(data['users']) is list:
            xml.startElement('people', {'type': 'array'})

            self._to_xml(data['users'], data['request_user'], xml)

            xml.endElement('people')
        # Otherwise just render a person
        else:
            self.render_person(data['users'], data['request_user'], xml)
        xml.endDocument()
        return stream.getvalue()
 def test_print_file(self):
     string = "aaa\tintergenic\tsRNA\t10\t15\t.\t+\t."
     nums = {'pro': 3, 'tss': 3, 'uni': 0, 'cds': 3, 'ta': 3}
     out_table = StringIO()
     output = StringIO()
     srna_datas = {"high": 20, "low": 5, "best": 13,
                   "conds": {"cond1": "test1"},
                   "detail": [{"track": "test1", "high": 30,
                               "low": 10, "avg": 15},
                              {"track": "test2", "high": 25,
                               "low": 13, "avg": 20}]}
     args = self.mock_args.mock()
     args.nums = nums
     args.out_table = out_table
     args.output = output
     args.table_best = False
     si.print_file(string, "TSS_160+", srna_datas, "intergenic",
                   args, "aaa")
     self.assertEqual(out_table.getvalue(),
                      ("aaa\t00000\t10\t15\t+\tcond1\ttest1\t13\t20\t5\t"
                       "TSS_160+\ttest1(avg=15;high=30;low=10);"
                       "test2(avg=20;high=25;low=13)\n"))
     self.assertEqual(output.getvalue(),
                      ("aaa\tintergenic\tsRNA\t10\t15\t.\t+\t.\t"
                       "ID=aaa_srna0;Name=sRNA_00000;sRNA_type=intergenic;"
                       "with_TSS=TSS_160+;best_avg_coverage=13;"
                       "best_high_coverage=20;best_low_coverage=5\n"))
Example #28
0
File: unit.py Project: dtjm/py-yajl
class DumpOptionsTests(unittest.TestCase):
    stream = None
    def setUp(self):
        self.stream = StringIO()

    def test_indent_four(self):
        rc = yajl.dump({'foo' : 'bar'}, self.stream, indent=4)
        expected = '{\n    "foo": "bar"\n}\n'
        self.assertEquals(self.stream.getvalue(), expected)

    def test_indent_zero(self):
        rc = yajl.dump({'foo' : 'bar'}, self.stream, indent=0)
        expected = '{\n"foo": "bar"\n}\n'
        self.assertEquals(self.stream.getvalue(), expected)

    def test_indent_str(self):
        self.failUnlessRaises(TypeError, yajl.dump, {'foo' : 'bar'}, self.stream, indent='4')

    def test_negative_indent(self):
        ''' Negative `indent` should not result in pretty printing '''
        rc = yajl.dump({'foo' : 'bar'}, self.stream, indent=-1)
        self.assertEquals(self.stream.getvalue(), '{"foo":"bar"}')

    def test_none_indent(self):
        ''' None `indent` should not result in pretty printing '''
        rc = yajl.dump({'foo' : 'bar'}, self.stream, indent=None)
        self.assertEquals(self.stream.getvalue(), '{"foo":"bar"}')
Example #29
0
    def test_info_memory_usage_qualified(self):

        buf = StringIO()
        df = DataFrame(1, columns=list('ab'),
                       index=[1, 2, 3])
        df.info(buf=buf)
        assert '+' not in buf.getvalue()

        buf = StringIO()
        df = DataFrame(1, columns=list('ab'),
                       index=list('ABC'))
        df.info(buf=buf)
        assert '+' in buf.getvalue()

        buf = StringIO()
        df = DataFrame(1, columns=list('ab'),
                       index=pd.MultiIndex.from_product(
                           [range(3), range(3)]))
        df.info(buf=buf)
        assert '+' not in buf.getvalue()

        buf = StringIO()
        df = DataFrame(1, columns=list('ab'),
                       index=pd.MultiIndex.from_product(
                           [range(3), ['foo', 'bar']]))
        df.info(buf=buf)
        assert '+' in buf.getvalue()
Example #30
0
def run_level36():
    f = """\
4
    2 1 1 2
   3 3 3 . .
  2 3 3 . 4 .
 . 2 . 2 4 3 2
  2 2 . . . 2
   4 3 4 . .
    3 2 3 3
"""
    order = DESCENDING
    strategy = Done.FIRST_STRATEGY
    output = StringIO()
    solve_file(f, strategy, order, output)
    expected = """\
   3 4 3 2 
  3 4 4 . 3 
 2 . . 3 4 3 
2 . 1 . 3 . 2 
 3 3 . 2 . 2 
  3 . 2 . 2 
   2 2 . 1 
"""
    if output.getvalue() != expected:
        raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
Example #31
0
def test_split_file_writer_with_json(s3, folder, filename, compress,
                                     filewriter_type):
    """Test jsonline string and bytes writer"""

    bucket_name = "test"
    ext = "jsonl.gz" if compress else "jsonl"

    s3.meta.client.create_bucket(
        Bucket=bucket_name,
        CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
    )

    jsonl_data = [
        {
            "i": 1,
            "x1": "a",
            "x2": "b"
        },
        {
            "i": 2,
            "x1": "a",
            "x2": "b"
        },
        {
            "i": 3,
            "x1": "a",
            "x2": "b"
        },
        {
            "i": 4,
            "x1": "a",
            "x2": "b"
        },
        {
            "i": 5,
            "x1": "a",
            "x2": "b"
        },
    ]

    if filewriter_type == "string":
        f = StringSplitFileWriter(
            f"s3://{bucket_name}/{folder}",
            filename,
            max_bytes=60,
            compress_on_upload=compress,
            file_extension=ext,
        )

    elif filewriter_type == "bytes":
        f = BytesSplitFileWriter(
            f"s3://{bucket_name}/{folder}",
            filename,
            max_bytes=60,
            compress_on_upload=compress,
            file_extension=ext,
        )

    else:
        raise ValueError(
            "Input filewriter_type must be either 'string' or 'bytes'")

    # Write data
    j_writer = jsonlines.Writer(f)

    expected_file = StringIO()
    e_j_writer = jsonlines.Writer(expected_file)

    for row in jsonl_data:
        j_writer.write(row)
        e_j_writer.write(row)
    f.close()

    actual_s3_objects = sorted(
        [o.key for o in s3.Bucket(bucket_name).objects.all()])

    # Test files written to s3
    expected_s3_objects = [
        f"{folder}{filename}-0.{ext}",
        f"{folder}{filename}-1.{ext}",
        f"{folder}{filename}-2.{ext}",
    ]
    assert expected_s3_objects == actual_s3_objects

    # Test file contents
    expected = expected_file.getvalue()
    actual = ""
    for expeceted_object in expected_s3_objects:
        file_object = BytesIO()
        s3.Object(bucket_name, expeceted_object).download_fileobj(file_object)
        if compress:
            actual += gzip.decompress(file_object.getvalue()).decode("utf-8")
        else:
            actual += file_object.getvalue().decode("utf-8")
        file_object.close()

    assert actual == expected
Example #32
0
wrongmovefrom = 0
while not b.is_game_over():
    print("Referee Board:")
    b.prettyPrint()
    print("Before move", nbmoves)
    legals = b.legal_moves()
    print("Legal Moves: ", legals)
    nbmoves += 1
    otherplayer = (nextplayer + 1) % 2
    othercolor = Goban.Board.flip(nextplayercolor)

    currentTime = time.time()
    sys.stdout = stringio
    move = players[nextplayer].getPlayerMove()
    sys.stdout = sysstdout
    playeroutput = "\r" + stringio.getvalue()
    stringio.truncate(0)
    print(("[Player "+str(nextplayer) + "] ").join(playeroutput.splitlines(True)))
    outputs[nextplayer] += playeroutput
    totalTime[nextplayer] += time.time() - currentTime
    print("Player ", nextplayercolor, players[nextplayer].getPlayerName(), "plays" + str(move))
    if not move in legals:
        print(otherplayer, nextplayer, nextplayercolor)
        print("Problem: illegal move")
        wrongmovefrom = nextplayercolor
        break
    b.push(move)
    players[otherplayer].playOpponentMove(move)

    nextplayer = otherplayer
    nextplayercolor = othercolor
Example #33
0
def put_df_to_S3(df, bucket, path):
    from io import StringIO
    csv_buffer = StringIO()
    df.to_csv(csv_buffer)
    s3_resource = boto3.resource('s3')
    s3_resource.Object(bucket, path).put(Body=csv_buffer.getvalue())
Example #34
0
class OutputEventFilter(object):
    '''
    File-like object that looks for encoded job events in stdout data.
    '''

    EVENT_DATA_RE = re.compile(
        r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')

    def __init__(self, event_callback):
        self._event_callback = event_callback
        self._counter = 0
        self._start_line = 0
        self._buffer = StringIO()
        self._last_chunk = ''
        self._current_event_data = None

    def flush(self):
        # pexpect wants to flush the file it writes to, but we're not
        # actually capturing stdout to a raw file; we're just
        # implementing a custom `write` method to discover and emit events from
        # the stdout stream
        pass

    def write(self, data):
        data = smart_str(data)
        self._buffer.write(data)

        # keep a sliding window of the last chunk written so we can detect
        # event tokens and determine if we need to perform a search of the full
        # buffer
        should_search = '\x1b[K' in (self._last_chunk + data)
        self._last_chunk = data

        # Only bother searching the buffer if we recently saw a start/end
        # token (\x1b[K)
        while should_search:
            value = self._buffer.getvalue()
            match = self.EVENT_DATA_RE.search(value)
            if not match:
                break
            try:
                base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
                event_data = json.loads(base64.b64decode(base64_data))
            except ValueError:
                event_data = {}
            self._emit_event(value[:match.start()], event_data)
            remainder = value[match.end():]
            self._buffer = StringIO()
            self._buffer.write(remainder)
            self._last_chunk = remainder

    def close(self):
        value = self._buffer.getvalue()
        if value:
            self._emit_event(value)
            self._buffer = StringIO()
        self._event_callback(dict(event='EOF', final_counter=self._counter))

    def _emit_event(self, buffered_stdout, next_event_data=None):
        next_event_data = next_event_data or {}
        if self._current_event_data:
            event_data = self._current_event_data
            stdout_chunks = [buffered_stdout]
        elif buffered_stdout:
            event_data = dict(event='verbose')
            stdout_chunks = buffered_stdout.splitlines(True)
        else:
            stdout_chunks = []

        for stdout_chunk in stdout_chunks:
            self._counter += 1
            event_data['counter'] = self._counter
            event_data['stdout'] = stdout_chunk[:-2] if len(
                stdout_chunk) > 2 else ""
            n_lines = stdout_chunk.count('\n')
            event_data['start_line'] = self._start_line
            event_data['end_line'] = self._start_line + n_lines
            self._start_line += n_lines
            if self._event_callback:
                self._event_callback(event_data)

        if next_event_data.get('uuid', None):
            self._current_event_data = next_event_data
        else:
            self._current_event_data = None
Example #35
0
 def test_command_start(self):
     out = StringIO()
     call_command('directord', daemon_action='start', stdout=out)
     self.assertIn('Expected output', out.getvalue())
Example #36
0
def generate_c_docstrings():
    from astropy.wcs import docstrings
    docstrings = docstrings.__dict__
    keys = [
        key for key in docstrings.keys()
        if not key.startswith('__') and type(key) in string_types
    ]
    keys.sort()
    docs = {}
    for key in keys:
        docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0'

    h_file = StringIO()
    h_file.write("""/*
DO NOT EDIT!

This file is autogenerated by astropy/wcs/setup_package.py.  To edit
its contents, edit astropy/wcs/docstrings.py
*/

#ifndef __DOCSTRINGS_H__
#define __DOCSTRINGS_H__

#if defined(_MSC_VER)
void fill_docstrings(void);
#endif

""")
    for key in keys:
        val = docs[key]
        h_file.write('extern char doc_{0}[{1}];\n'.format(key, len(val)))
    h_file.write("\n#endif\n\n")

    setup_helpers.write_if_different(join(WCSROOT, 'include', 'docstrings.h'),
                                     h_file.getvalue().encode('utf-8'))

    c_file = StringIO()
    c_file.write("""/*
DO NOT EDIT!

This file is autogenerated by astropy/wcs/setup_package.py.  To edit
its contents, edit astropy/wcs/docstrings.py

The weirdness here with strncpy is because some C compilers, notably
MSVC, do not support string literals greater than 256 characters.
*/

#include <string.h>
#include "docstrings.h"

#if defined(_MSC_VER)
""")
    for key in keys:
        val = docs[key]
        c_file.write('char doc_{0}[{1}];\n'.format(key, len(val)))

    c_file.write("\nvoid fill_docstrings(void)\n{\n")
    for key in keys:
        val = docs[key]
        # For portability across various compilers, we need to fill the
        # docstrings in 256-character chunks
        for i in range(0, len(val), 256):
            chunk = string_escape(val[i:i + 256]).replace('"', '\\"')
            c_file.write('   strncpy(doc_{0} + {1}, "{2}", {3});\n'.format(
                key, i, chunk, min(len(val) - i, 256)))
        c_file.write("\n")
    c_file.write("\n}\n\n")

    c_file.write("#else /* UNIX */\n")

    for key in keys:
        val = docs[key]
        c_file.write('char doc_{0}[{1}] = "{2}";\n\n'.format(
            key, len(val),
            string_escape(val).replace('"', '\\"')))

    c_file.write("#endif\n")

    setup_helpers.write_if_different(join(WCSROOT, 'src', 'docstrings.c'),
                                     c_file.getvalue().encode('utf-8'))
Example #37
0
File: main.py Project: gleke/queue
    def runjob(self, db, job_uuid, **kw):
        http.request.session.db = db
        env = http.request.env(user=odoo.SUPERUSER_ID)

        def retry_postpone(job, message, seconds=None):
            job.env.clear()
            with odoo.api.Environment.manage():
                with odoo.registry(job.env.cr.dbname).cursor() as new_cr:
                    job.env = job.env(cr=new_cr)
                    job.postpone(result=message, seconds=seconds)
                    job.set_pending(reset_retry=False)
                    job.store()
                    new_cr.commit()

        # ensure the job to run is in the correct state and lock the record
        env.cr.execute(
            "SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE",
            (job_uuid, ENQUEUED),
        )
        if not env.cr.fetchone():
            _logger.warn(
                "was requested to run job %s, but it does not exist, "
                "or is not in state %s",
                job_uuid,
                ENQUEUED,
            )
            return ""

        job = Job.load(env, job_uuid)
        assert job and job.state == ENQUEUED

        try:
            try:
                self._try_perform_job(env, job)
            except OperationalError as err:
                # Automatically retry the typical transaction serialization
                # errors
                if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
                    raise

                retry_postpone(
                    job, tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY
                )
                _logger.debug("%s OperationalError, postponed", job)

        except NothingToDoJob as err:
            if str(err):
                msg = str(err)
            else:
                msg = _("Job interrupted and set to Done: nothing to do.")
            job.set_done(msg)
            job.store()
            env.cr.commit()

        except RetryableJobError as err:
            # delay the job later, requeue
            retry_postpone(job, str(err), seconds=err.seconds)
            _logger.debug("%s postponed", job)

        except (FailedJobError, Exception):
            buff = StringIO()
            traceback.print_exc(file=buff)
            _logger.error(buff.getvalue())
            job.env.clear()
            with odoo.api.Environment.manage():
                with odoo.registry(job.env.cr.dbname).cursor() as new_cr:
                    job.env = job.env(cr=new_cr)
                    job.set_failed(exc_info=buff.getvalue())
                    job.store()
                    new_cr.commit()
            raise

        return ""
Example #38
0
def test_check_estimator():
    # tests that the estimator actually fails on "bad" estimators.
    # not a complete test of all checks, which are very extensive.

    # check that we have a set_params and can clone
    msg = "it does not implement a 'get_params' methods"
    assert_raises_regex(TypeError, msg, check_estimator, object)
    assert_raises_regex(TypeError, msg, check_estimator, object())
    # check that values returned by get_params match set_params
    msg = "get_params result does not match what was passed to set_params"
    assert_raises_regex(AssertionError, msg, check_estimator,
                        ModifiesValueInsteadOfRaisingError())
    assert_warns(UserWarning, check_estimator, RaisesErrorInSetParams())
    assert_raises_regex(AssertionError, msg, check_estimator,
                        ModifiesAnotherValue())
    # check that we have a fit method
    msg = "object has no attribute 'fit'"
    assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
    assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator())
    # check that fit does input validation
    msg = "ValueError not raised"
    assert_raises_regex(AssertionError, msg, check_estimator,
                        BaseBadClassifier)
    assert_raises_regex(AssertionError, msg, check_estimator,
                        BaseBadClassifier())
    # check that sample_weights in fit accepts pandas.Series type
    try:
        from pandas import Series  # noqa
        msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
               "'sample_weight' parameter is of type pandas.Series")
        assert_raises_regex(ValueError, msg, check_estimator,
                            NoSampleWeightPandasSeriesType)
    except ImportError:
        pass
    # check that predict does input validation (doesn't accept dicts in input)
    msg = "Estimator doesn't check for NaN and inf in predict"
    assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
    assert_raises_regex(AssertionError, msg, check_estimator,
                        NoCheckinPredict())
    # check that estimator state does not change
    # at transform/predict/predict_proba time
    msg = 'Estimator changes __dict__ during predict'
    assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
    # check that `fit` only changes attribures that
    # are private (start with an _ or end with a _).
    msg = ('Estimator ChangesWrongAttribute should not change or mutate  '
           'the parameter wrong_attribute from 0 to 1 during fit.')
    assert_raises_regex(AssertionError, msg, check_estimator,
                        ChangesWrongAttribute)
    check_estimator(ChangesUnderscoreAttribute)
    # check that `fit` doesn't add any public attribute
    msg = (r'Estimator adds public attribute\(s\) during the fit method.'
           ' Estimators are only allowed to add private attributes'
           ' either started with _ or ended'
           ' with _ but wrong_attribute added')
    assert_raises_regex(AssertionError, msg, check_estimator,
                        SetsWrongAttribute)
    # check for invariant method
    name = NotInvariantPredict.__name__
    method = 'predict'
    msg = ("{method} of {name} is not invariant when applied "
           "to a subset.").format(method=method, name=name)
    assert_raises_regex(AssertionError, msg, check_estimator,
                        NotInvariantPredict)
    # check for sparse matrix input handling
    name = NoSparseClassifier.__name__
    msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
    # the check for sparse input handling prints to the stdout,
    # instead of raising an error, so as not to remove the original traceback.
    # that means we need to jump through some hoops to catch it.
    old_stdout = sys.stdout
    string_buffer = StringIO()
    sys.stdout = string_buffer
    try:
        check_estimator(NoSparseClassifier)
    except:
        pass
    finally:
        sys.stdout = old_stdout
    assert msg in string_buffer.getvalue()

    # Large indices test on bad estimator
    msg = ('Estimator LargeSparseNotSupportedClassifier doesn\'t seem to '
           r'support \S{3}_64 matrix, and is not failing gracefully.*')
    assert_raises_regex(AssertionError, msg, check_estimator,
                        LargeSparseNotSupportedClassifier)

    # non-regression test for estimators transforming to sparse data
    check_estimator(SparseTransformer())

    # doesn't error on actual estimator
    check_estimator(AdaBoostClassifier)
    check_estimator(AdaBoostClassifier())
    check_estimator(MultiTaskElasticNet)
    check_estimator(MultiTaskElasticNet())

    # doesn't error on binary_only tagged estimator
    check_estimator(TaggedBinaryClassifier)

    # does error on binary_only untagged estimator
    msg = 'Only 2 classes are supported'
    assert_raises_regex(ValueError, msg, check_estimator,
                        UntaggedBinaryClassifier)
Example #39
0
def distribute_task(table,
                    func_to_run,
                    bucket=None,
                    func_kwargs=None,
                    func_class=None,
                    func_class_kwargs=None,
                    catch=False,
                    group_count=100,
                    storage='s3'):
    """
    Distribute processing rows in a table across multiple AWS Lambda invocations.

    If you are running the processing of a table inside AWS Lambda, then you
    are limited by how many rows can be processed within the Lambda's time limit
    (at time-of-writing, maximum 15min).

    Based on experience and some napkin math, with
    the same data that would allow 1000 rows to be processed inside a single
    AWS Lambda instance, this method allows 10 MILLION rows to be processed.

    Rather than converting the table to SQS
    or other options, the fastest way is to upload the table to S3, and then
    invoke multiple Lambda sub-invocations, each of which can be sent a
    byte-range of the data in the S3 CSV file for which to process.

    Using this method requires some setup. You have three tasks:

    1. Define the function to process rows, the first argument, must take
       your table's data (though only a subset of rows will be passed)
       (e.g. `def task_for_distribution(table, **kwargs):`)
    2. Where you would have run `task_for_distribution(my_table, **kwargs)`
       instead call `distribute_task(my_table, task_for_distribution, func_kwargs=kwargs)
       (either setting env var S3_TEMP_BUCKET or passing a bucket= parameter)
    3. Setup your Lambda handler to include :py:meth:`parsons.aws.event_command`
       (or run and deploy your lambda with `Zappa <https://github.com/Miserlou/Zappa>`_)

    To test locally, include the argument `storage="local"` which will test
    the distribute_task function, but run the task sequentially and in local memory.

    A minimalistic example Lambda handler might look something like this:

    .. code-block:: python
       :emphasize-lines: 5,6

       from parsons.aws import event_command, distribute_task

       def process_table(table, foo, bar=None):
           for row in table:
               do_sloooooow_thing(row, foo, bar)

       def handler(event, context):
           ## ADD THESE TWO LINES TO TOP OF HANDLER:
           if event_command(event, context):
               return
           table = FakeDatasource.load_to_table(username='******', password='******')
           # table is so big that running
           #   process_table(table, foo=789, bar='baz') would timeout
           # so instead we:
           distribute_task(table, process_table,
                           bucket='my-temp-s3-bucket',
                           func_kwargs={'foo': 789, 'bar': 'baz'})

    `Args:`
        table: Parsons Table
           Table of data you wish to distribute processing across Lambda invocations
           of `func_to_run` argument.
        func_to_run: function
           The function you want to run whose
           first argument will be a subset of table
        bucket: str
           The bucket name to use for s3 upload to process the whole table
           Not required if you set environment variable ``S3_TEMP_BUCKET``
        func_kwargs: dict
           If the function has other arguments to pass along with `table`
           then provide them as a dict here. They must all be JSON-able.
        func_class: class
           If the function is a classmethod or function on a class,
           then pass the pure class here.
           E.g. If you passed `ActionKit.bulk_upload_table`,
           then you would pass `ActionKit` here.
        func_class_kwargs: dict
           If it is a class function, and the class must be instantiated,
           then pass the kwargs to instantiate the class here.
           E.g. If you passed `ActionKit.bulk_upload_table` as the function,
           then you would pass {'domain': ..., 'username': ... etc} here.
           This must all be JSON-able data.
        catch: bool
           Lambda will retry running an event three times if there's an
           exception -- if you want to prevent this, set `catch=True`
           and then it will catch any errors and stop retries.
           The error will be in CloudWatch logs with string "Distribute Error"
           This might be important if row-actions are not idempotent and your
           own function might fail causing repeats.
        group_count: int
           Set this to how many rows to process with each Lambda invocation (Default: 100)
        storage: str
           Debugging option: Defaults to "s3". To test distribution locally without s3,
           set to "local".
    `Returns:`
        Debug information -- do not rely on the output, as it will change
        depending on how this method is invoked.
    """
    if storage not in ('s3', 'local'):
        raise DistributeTaskException(f'storage argument must be s3 or local')
    bucket = check('S3_TEMP_BUCKET', bucket)
    csvdata = StringIO()
    outcsv = csv.writer(csvdata)
    outcsv.writerows(table.table.data())
    return distribute_task_csv(csvdata.getvalue().encode('utf-8-sig'),
                               func_to_run,
                               bucket,
                               header=table.columns,
                               func_kwargs=func_kwargs,
                               func_class=func_class,
                               func_class_kwargs=func_class_kwargs,
                               catch=catch,
                               group_count=group_count,
                               storage=storage)
Example #40
0
    entry_points={
        'console_scripts': [
            'cocotb-config=cocotb.config:main',
        ]
    },
    platforms='any',
    classifiers=[
        "Programming Language :: Python :: 3",
        "Programming Language :: Python :: 3.5",
        "Programming Language :: Python :: 3.6",
        "Programming Language :: Python :: 3.7",
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "License :: OSI Approved :: BSD License",
        "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
    ],

    # these appear in the sidebar on PyPI
    project_urls={
        "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
        "Source Code": "https://github.com/cocotb/cocotb",
        "Documentation": "https://docs.cocotb.org",
    },

    extras_require={
        "bus": ["cocotb_bus"]
    }
)

print(log_stream.getvalue())
 def test_print3 (self) :
     w = StringIO()
     collatz_print(w, 23832, 23234, 284)
     self.assertEqual(w.getvalue(), "23832 23234 284\n")
Example #42
0
finally:
    if f:
        f.close()


with open('d:/dev-test', 'r') as f:
    print(f.read())

with open('d:/dev-test', 'r') as f:
    for line in f.readlines():
        print(line.strip())

with open('d:/dev-test', 'r', encoding='gbk', errors='ignore') as f:
    for line in f.readlines():
        print(line.strip())

with open('d:/1.txt', 'w', encoding='gbk') as f:
    f.write('abc')

f = StringIO('Hello!\nHi!\nGoodbye!')
while True:
    s = f.readline()
    if s == '':
        break
    print(s.strip())

f = BytesIO()
f.write('中文'.encode('utf-8'))
print(f.getvalue())

 def test_print1 (self) :
     w = StringIO()
     collatz_print(w, 1, 10, 20)
     self.assertEqual(w.getvalue(), "1 10 20\n")
Example #44
0
class TestConditionalVariants(unittest.TestCase):

    def setUp(self):
        self.lgr = LGR()
        # Configure log system to redirect validation logs to local attribute
        self.log_output = StringIO()
        ch = logging.StreamHandler(self.log_output)
        ch.setLevel(logging.DEBUG)
        logging.getLogger('lgr.validate').addHandler(ch)

    def test_empty_lgr(self):
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertEqual(len(log_content), 0)
        self.assertTrue(success)
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': []})

    def test_no_variants(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_cp([0x0062])
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertEqual(len(log_content), 0)
        self.assertTrue(success)
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': []})

    def test_no_rule_when(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_variant([0x0061], [0x0062], when="when-rule")
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertGreater(len(log_content), 0)
        self.assertEqual(log_content,
                         "CP U+0061: Variant 'U+0062' \"when\" attribute "
                         "'when-rule' is not an existing rule name.\n")
        self.assertFalse(success)
        var = self.lgr.get_variant([0x0061], (0x0062, ))[0]
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': [{'char': self.lgr.get_char([0x0061]),
                                                      'variant': var,
                                                      'rule_type': 'when',
                                                      'rule': var.when}]})

    def test_no_rule_not_when(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_variant([0x0061], [0x0062], not_when="not-when-rule")
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertGreater(len(log_content), 0)
        self.assertEqual(log_content,
                         "CP U+0061: Variant 'U+0062' \"not-when\" attribute "
                         "'not-when-rule' is not an existing rule name.\n")
        self.assertFalse(success)
        var = self.lgr.get_variant([0x0061], (0x0062, ))[0]
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': [{'char': self.lgr.get_char([0x0061]),
                                                      'variant': var,
                                                      'rule_type': 'not-when',
                                                      'rule': var.not_when}]})

    def test_no_rule_when_not_when(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_variant([0x0061], [0x0062], when="when-rule", not_when="not-when-rule", force=True)
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertGreater(len(log_content), 0)
        self.assertEqual(log_content,
                         "CP U+0061: Variant 'U+0062' \"when\" attribute "
                         "'when-rule' is not an existing rule name.\n"
                         "CP U+0061: Variant 'U+0062' \"not-when\" attribute "
                         "'not-when-rule' is not an existing rule name.\n")
        self.assertFalse(success)
        var = self.lgr.get_variant([0x0061], (0x0062, ))[0]
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': [{'char': self.lgr.get_char([0x0061]),
                                                      'variant': var,
                                                      'rule_type': 'when',
                                                      'rule': var.when},
                                                     {'char': self.lgr.get_char([0x0061]),
                                                      'variant': var,
                                                      'rule_type': 'not-when',
                                                      'rule': var.not_when}
                                                     ]})

    def test_conditional_when_ok(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_variant([0x0061], [0x0062], when="when-rule")
        self.lgr.rules.append("when-rule")
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertEqual(len(log_content), 0)
        self.assertTrue(success)
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': []})

    def test_conditional_not_when_ok(self):
        self.lgr.add_cp([0x0061])
        self.lgr.add_variant([0x0061], [0x0062], not_when="not-when-rule")
        self.lgr.rules.append("not-when-rule")
        success, result = check_conditional_variants(self.lgr, {})
        log_content = self.log_output.getvalue()
        self.assertEqual(len(log_content), 0)
        self.assertTrue(success)
        self.assertDictEqual(result, {'description': 'Testing conditional variants',
                                      'repertoire': []})
 def test_solve2 (self):
     read = StringIO("491 2460\n950 2853\n325 1296\n")
     write = StringIO()
     collatz_solve(read, write)
     self.assertEqual(
         write.getvalue(), "491 2460 183\n950 2853 209\n325 1296 182\n")
 def test_print2 (self) :
     w = StringIO()
     collatz_print(w, 1, 100, 119)
     self.assertEqual(w.getvalue(), "1 100 119\n")
Example #47
0
 def test_command_success(self, ctx, config):
     result = StringIO()
     ctx.cluster.run(args=["python3", "-c", "print('hi')"], stdout=result)
     assert result.getvalue().strip() == "hi"
 def test_solve3 (self):
     reader = StringIO("1 1\n8493 358846\n8473 27\n11234 1378\n")
     writer = StringIO()
     collatz_solve(reader, writer)
     self.assertEqual(writer.getvalue(), \
         "1 1 1\n8493 358846 443\n8473 27 262\n11234 1378 268\n")
Example #49
0
def save_ihelp_to_file(function,save_help=False,save_code=True, 
                        as_md=False,as_txt=True,
                        folder='readme_resources/ihelp_outputs/',
                        filename=None,file_mode='w'):
    """Saves the string representation of the ihelp source code as markdown. 
    Filename should NOT have an extension. .txt or .md will be added based on
    as_md/as_txt.
    If filename is None, function name is used."""

    if as_md & as_txt:
        raise Exception('Only one of as_md / as_txt may be true.')

    import sys
    from io import StringIO
    ## save original output to restore
    orig_output = sys.stdout
    ## instantiate io stream to capture output
    io_out = StringIO()
    ## Redirect output to output stream
    sys.stdout = io_out
    
    if save_code:
        print('### SOURCE:')
        help_md = get_source_code_markdown(function)
        ## print output to io_stream
        print(help_md)
        
    if save_help:
        print('### HELP:')
        help(function)
        
    ## Get printed text from io stream
    text_to_save = io_out.getvalue()
    

    ## MAKE FULL FILENAME
    if filename is None:

        ## Find the name of the function
        import re
        func_names_exp = re.compile(r'def (\w*)\(')
        func_name = func_names_exp.findall(text_to_save)[0]    
        print(f'Found code for {func_name}')

        save_filename = folder+func_name#+'.txt'
    else:
        save_filename = folder+filename

    if as_md:
        ext = '.md'
    elif as_txt:
        ext='.txt'

    full_filename = save_filename + ext
    
    with open(full_filename,file_mode) as f:
        f.write(text_to_save)
        
    print(f'Output saved as {full_filename}')
    
    sys.stdout = orig_output
 def test_solve11 (self) :
     r = StringIO("1 10\n100 200\n201 210\n900 1000\n")
     w = StringIO()
     collatz_solve(r, w)
     self.assertEqual(w.getvalue(), "1 10 20\n100 200 125\n201 210 89\n900 1000 174\n")
Example #51
0
# Train Decision Tree Classifer
clf = clf.fit(X_train, y_train)

#Predict the response for test dataset
y_pred = clf.predict(X_test)

# Model Accuracy, how often is the classifier correct?
print("Unpruned Accuracy:", metrics.accuracy_score(y_test, y_pred))

dot_data = StringIO()
export_graphviz(clf,
                out_file=dot_data,
                filled=True,
                rounded=True,
                special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("DT.pdf")

# Create Decision Tree classifer object
clfl2 = DecisionTreeClassifier(max_depth=2, criterion="entropy")

# Train Decision Tree Classifer
clfl2 = clfl2.fit(X_train, y_train)

#Predict the response for test dataset
y_pred = clfl2.predict(X_test)

# Model Accuracy, how often is the classifier correct?
print("Accuracy (2 layer):", metrics.accuracy_score(y_test, y_pred))

dot_data = StringIO()
Example #52
0
    def make_job(self, scene):
        imp.reload(export_cycles)
        imp.reload(crowdprocess)

        fp = StringIO()

        export_cycles.export_cycles(fp=fp, scene=scene, inline_textures=True)
        scene_xml = fp.getvalue()

        emcycles_core = open(
            path.join(path.dirname(__file__),
                      'emcycles/cloudrender_core.js')).read()

        pako = open(path.join(path.dirname(__file__),
                              'pako_deflate.js')).read()

        crowdprocess_func = '''
            function Run(data) {
                var console = {
                    log: function(){},
                    error: function(){},
                    warn: function(){},
                    time: function(){},
                    timeEnd: function(){},
                    assert: function(){}
                };
                var Module = {
                    print: function () {},
                    tileX: data.x,
                    tileY: data.y,
                    tileH: data.h,
                    tileW: data.w
                }

                var SCENE = %s;  // scene_xml
                var INCLUDES = [];

                ;%s;  // emcycles

                data.tile = Module.imageData

                ;%s;  // pako

                ;%s;  // base64

                var deflated = pako.deflate(data.tile)

                if (deflated.length < data.tile.length) {
                    data.tile = deflated
                    data.deflated = true
                }
                data.tile = base64(data.tile)

                return data
            }
        ''' % (json.dumps(scene_xml), emcycles_core, pako, base_64_enc)

        # open('/tmp/wow.js', 'w').write(crowdprocess_func)

        crp = crowdprocess.CrowdProcess(scene.ore_render.username,
                                        scene.ore_render.password)

        self.job = crp.job(crowdprocess_func)
Example #53
0
 def __str__(self):
     buf = StringIO()
     self.save(buf)
     return buf.getvalue()
Example #54
0
def ihelp_menu(function_list,box_style='warning', to_embed=False):#, to_file=False):#, json_file='ihelp_output.txt' ):
    """
    Creates a widget menu of the source code and and help documentation of the functions in function_list.
    
    Args:
        function_list (list): list of function object or string names of loaded function. 
        to_embed (bool, optional): Returns interface (layout,output) if True. Defaults to False.
        to_file (bool, optional): Save . Defaults to False.
        json_file (str, optional): [description]. Defaults to 'ihelp_output.txt'.
        
    Returns:
        full_layout (ipywidgets GridBox): Layout of interface.
        output ()
    """
    
    # Accepts a list of string names for loaded modules/functions to save the `help` output and 
    # inspect.getsource() outputs to dictionary for later reference and display
    ## One way using sys to write txt file
    import pandas as pd
    import sys
    import inspect
    from io import StringIO
    from IPython.display import display,Markdown
    notebook_output = sys.stdout
    result = StringIO()
    sys.stdout=result
    
    ## Turn single input into a list
    if isinstance(function_list,list)==False:
        function_list = [function_list]
    
    ## Make a dictionary of{function_name : function_object}
    functions_dict = dict()
    for fun in function_list:
        
        ## if input is a string, save string as key, and eval(function) as value
        if isinstance(fun, str):
            functions_dict[fun] = eval(fun)

        ## if input is a function, get the name of function using inspect and make key, function as value
        elif inspect.isfunction(fun):

            members= inspect.getmembers(fun)
            member_df = pd.DataFrame(members,columns=['param','values']).set_index('param')

            fun_name = member_df.loc['__name__'].values[0]
            functions_dict[fun_name] = fun
            
            
    ## Create an output dict to store results for functions
    output_dict = {}

    for fun_name, real_func in functions_dict.items():
        
        output_dict[fun_name] = {}
                
        ## First save help
        help(real_func)
        output_dict[fun_name]['help'] = result.getvalue()
        
        ## Clear contents of io stream
        result.truncate(0)
                
        try:
            ## Next save source
            source_DF = inspect.getsource(real_func)
            # # if markdown == True:
                
            #     output = "```python" +'\n'+source_DF+'\n'+"```"
            #     display(Markdown(output))
            # else:
            #     output=source_DF
            print(source_DF)
            # output_dict[fun_name]['source'] = source_DF

            # print(inspect.getsource(real_func)) #eval(fun)))###f"{eval(fun)}"))
        except:
            # print("Source code for object was not found")
            print("Source code for object was not found")


        # finally:
        output_dict[fun_name]['source'] = result.getvalue()
        ## clear contents of io stream
        result.truncate(0)
    
        
        ## Get file location
        try:
            file_loc = inspect.getfile(real_func)
            print(file_loc)
        except:
            print("File location not found")
            
        output_dict[fun_name]['file_location'] =result.getvalue()
        
        
        ## clear contents of io stream
        result.truncate(0)        
        
    ## Reset display back to notebook
    sys.stdout = notebook_output    

    # if to_file==True:    
    #     with open(json_file,'w') as f:
    #         import json
    #         json.dump(output_dict,f)

    ## CREATE INTERACTIVE MENU
    from ipywidgets import interact, interactive, interactive_output
    import ipywidgets as widgets
    from IPython.display import display
    # from functions_combined_BEST import ihelp
    # import functions_combined_BEST as ji

    ## Check boxes
    check_help = widgets.Checkbox(description="Show 'help(func)'",value=True)
    check_source = widgets.Checkbox(description="Show source code",value=True)
    check_fileloc=widgets.Checkbox(description="Show source filepath",value=False)
    check_boxes = widgets.HBox(children=[check_help,check_source,check_fileloc])

    ## dropdown menu (dropdown, label, button)
    dropdown = widgets.Dropdown(options=list(output_dict.keys()))
    label = widgets.Label('Function Menu')
    button = widgets.ToggleButton(description='Show/hide',value=False)
    
    ## Putting it all together
    title = widgets.Label('iHelp Menu: View Help and/or Source Code')
    menu = widgets.HBox(children=[label,dropdown,button])
    titled_menu = widgets.VBox(children=[title,menu])
    full_layout = widgets.GridBox(children=[titled_menu,check_boxes],box_style=box_style)
    

    ## Define output manager
    # show_output = widgets.Output()

    def dropdown_event(change): 
        new_key = change.new
        output_display = output_dict[new_key]
    dropdown.observe(dropdown_event,names='values')

    
    def show_ihelp(display_help=button.value,function=dropdown.value,
                   show_help=check_help.value,show_code=check_source.value, 
                   show_file=check_fileloc.value,ouput_dict=output_dict):

        from IPython.display import Markdown
        # import functions_combined_BEST as ji
        from IPython.display import display        
        page_header = '---'*28
        # import json
        # with open(json_file,'r') as f:
        #     output_dict = json.load(f)
        func_dict = output_dict[function]
        source_code=None

        if display_help:
            if show_help:
#                 display(print(func_dict['help']))
                print(page_header)
                banner = ''.join(["---"*2,' HELP ',"---"*24,'\n'])
                print(banner)
                print(func_dict['help'])

            if show_code:
                print(page_header)

                banner = ''.join(["---"*2,' SOURCE -',"---"*23])
                print(banner)

                source_code = func_dict['source']#.encode('utf-8')
                if source_code.startswith('`'):
                    source_code = source_code.replace('`',"").encode('utf-8')

                if 'google.colab' in sys.modules:
                    print(source_code)
                else:
                    md_source = "```python\n"+source_code
                    md_source += "```"
                    display(Markdown(md_source))
            
            
            if show_file:
                print(page_header)
                banner = ''.join(["---"*2,' FILE LOCATION ',"---"*21])
                print(banner)
                
                file_loc = func_dict['file_location']
                print(file_loc)
                
            if show_help==False & show_code==False & show_file==False:
                display('Check at least one "Show" checkbox for output.')
                
        else:
            display('Press "Show/hide" for display')
            
    ## Fully integrated output
    output = widgets.interactive_output(show_ihelp,{'display_help':button,
                                                   'function':dropdown,
                                                   'show_help':check_help,
                                                   'show_code':check_source,
                                                   'show_file':check_fileloc})
    if to_embed:
        return full_layout, output
    else:
        display(full_layout, output)
Example #55
0
 def test_main(self):
     capturedOutput = StringIO()
     sys.stdout = capturedOutput
     QP_main()
     sys.stdout = sys.__stdout__
     self.assertIn("-59231", capturedOutput.getvalue())
Example #56
0
class UMCmdTest(unittest.TestCase):
    def setUp(self):
        self.output = StringIO()
        sys.stdout = self.output

        v = um_view.UMView()
        d = data_util.DataUtil()
        a = analysis_util.AnalysisUtil()
        c = um_controller.UMController(v, d, a)
        self.cmd = um_cmd.UMCmd(c)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_search_1(self):
        self.cmd.do_search('blueprints')
        expected = 'Displaying the first 10 assets (max 10):'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_search_2(self):
        self.cmd.do_search('fire effects')
        expected = 'Displaying the first 6 assets (max 10):'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_category_1(self):
        self.cmd.do_category('blueprints')
        expected = 'Displaying the first 10 assets (max 10):'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_category_2(self):
        self.cmd.do_category('fire effects')
        expected = 'The category "fire effects" was not found'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_results_1(self):
        self.cmd.do_results(1)
        self.cmd.do_search('blueprints')
        expected = 'Displaying the first 1 assets (max 1):'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_view_asset_image_1(self):
        self.cmd.do_search('fire effects')
        self.reset_output()
        self.cmd.do_view_asset_image(1)
        expected = ''
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_view_asset_image_2(self):
        self.cmd.do_search('fire effects')
        self.reset_output()
        self.cmd.do_view_asset_image(99)
        expected = 'Value must be within the previous search results'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_view_asset_image_3(self):
        self.cmd.do_view_asset_image(1)
        expected = 'You must run a query first'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_analyse_results_1(self):
        self.cmd.do_search('fire effects')
        self.cmd.do_analyse_results('')
        expected = ''
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_analyse_results_2(self):
        self.cmd.do_analyse_results('')
        expected = 'You must run a query first'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_save_1(self):
        self.cmd.do_search('fire effects')
        self.cmd.do_save('')
        expected = ''
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_save_2(self):
        self.cmd.do_save('')
        expected = 'No results to save'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    # @unittest.skip("Skipping test to speed up execution while writing tests")
    def test_load_1(self):
        self.cmd.do_search('fire effects')
        self.cmd.do_save('')
        self.reset_output()
        self.cmd.do_load('')
        expected = 'Displaying the first 6 assets (max 6):'
        actual = self.output.getvalue().strip()[:len(expected)]
        self.assertEqual(actual, expected)

    def reset_output(self):
        """Clears the output buffer"""
        self.output.seek(0)
        self.output.truncate(0)
Example #57
0
class RepoSyncTest(unittest.TestCase):

    def setUp(self):
        self.reposync = spacewalk.satellite_tools.reposync

        # kill logging
        self.reposync.rhnLog.initLOG = Mock()

        # catching stdout
        # this could be assertRaisesRegexp in python>=2.7. just sayin'
        self.saved_stdout = sys.stdout
        self.stdout = StringIO()
        sys.stdout = self.stdout

        # catching stderr
        self.saved_stderr = sys.stderr
        self.stderr = StringIO()
        sys.stderr = self.stderr

        self.reposync.os = Mock()
        self.reposync.rhnSQL.initDB = Mock()
        self.reposync.rhnSQL.commit = Mock()

        _mock_rhnsql(
            self.reposync,
            [
              [{'id': 'id1', 'repo_label': 'label1', 'source_url': 'http://url.one', 'metadata_signed': 'Y', 'repo_type': 'yum'}],
              [{'id': 'id2', 'repo_label': 'label2', 'source_url': 'http://url.two', 'metadata_signed': 'N', 'repo_type': 'yum'}],
            ]
        )

    def tearDown(self):
        self.stdout.close()
        sys.stdout = self.saved_stdout

        self.stderr.close()
        sys.stderr = self.saved_stderr

        imp.reload(spacewalk.satellite_tools.reposync)

    def test_init_succeeds_with_correct_attributes(self):
        rs = _init_reposync(self.reposync, 'Label', RTYPE)

        self.assertEqual(rs.channel_label, 'Label')

        # these should have been set automatically
        self.assertEqual(rs.fail, False)
        self.assertEqual(rs.interactive, True)

    def test_init_with_custom_url(self):
        rs = _init_reposync(self.reposync, 'Label', RTYPE, url='http://example.com')

        self.assertEqual(rs.urls, [{'source_url': 'http://example.com',
                                    'repo_label': None,
                                    'id': None,
                                    'metadata_signed': 'N',
                                    'repo_type': 'yum'
                                  }])

    def test_init_with_custom_flags(self):
        rs = _init_reposync(self.reposync, 'Label', RTYPE, fail=True, noninteractive=True)

        self.assertEqual(rs.fail, True)
        self.assertEqual(rs.interactive, False)

    def test_init_wrong_url(self):
        """Test generates empty metadata via taskomatic and quits"""
        # the channel shouldn't be found in the database
        _mock_rhnsql(self.reposync, False)
        self.reposync.taskomatic.add_to_repodata_queue_for_channel_package_subscription = Mock()

        channel = {'org_id':1, 'id':1, 'arch': 'arch1'}
        self.reposync.RepoSync.load_channel = Mock(return_value=channel)

        self.assertRaises(SystemExit, self.reposync.RepoSync, 'WrongLabel', RTYPE)

        self.assertTrue(self.reposync.taskomatic.
                        add_to_repodata_queue_for_channel_package_subscription.
                        called)

    def test_init_rhnlog(self):
        """Init rhnLog successfully"""
        rs = _init_reposync(self.reposync, 'Label', RTYPE)

        self.assertTrue(self.reposync.rhnLog.initLOG.called)

    def test_init_channel(self):
        self.reposync.rhnChannel.channel_info = Mock(return_value=
                                                {'name': 'mocked Channel',
                                                 'id': 1,
                                                 'org_id': 1})
        self.reposync.RepoSync.get_compatible_arches = Mock(return_value=['arch1', 'arch2'])
        self.reposync.RepoSync.get_channel_arch = Mock('arch1')

        rs = self.reposync.RepoSync('Label', RTYPE)

        self.assertEqual(rs.channel, {'name': 'mocked Channel', 'id': 1, 'org_id': 1})

    def test_init_bad_channel(self):
        self.reposync.rhnChannel.channel_info = Mock(return_value=None)

        self.assertRaises(SystemExit, self.reposync.RepoSync, 'Label', RTYPE)

    def test_bad_repo_type(self):
        rs = _init_reposync(self.reposync, 'Label', RTYPE)
        self.assertRaises(SystemExit, rs.load_plugin, 'bad-repo-type')
        self.assertIn("Repository type bad-repo-type is not supported. "
                      "Could not import "
                      "spacewalk.satellite_tools."
                      "repo_plugins.bad-repo-type_src.\n",
                      self.stderr.getvalue())

    def test_sync_success_no_regen(self):
        rs = _init_reposync(self.reposync)

        rs.urls = [
          {"source_url": ["http://none.host/bogus-url"], "id": 42, "metadata_signed": "N", "repo_label": None, 'repo_type': 'yum'}]

        _mock_rhnsql(self.reposync, None)
        rs = _mock_sync(self.reposync, rs)
        rs.sync()

        self.assertEqual(rs.repo_plugin.call_args[0],
                (('http://none.host/bogus-url', 'bogus-url', True, True)))

        self.assertEqual(rs.import_packages.call_args,
                ((rs.mocked_plugin, 42, "http://none.host/bogus-url", 1), {}))
        self.assertEqual(rs.import_updates.call_args,
                ((rs.mocked_plugin,), {}))
        self.assertEqual(rs.import_products.call_args,
                ((rs.mocked_plugin,), {}))

        # for the rest just check if they were called or not
        self.assertTrue(rs.update_date.called)
        # these aren't supposed to be called unless self.regen is True
        self.assertFalse(self.reposync.taskomatic.add_to_repodata_queue_for_channel_package_subscription.called)
        self.assertFalse(self.reposync.taskomatic.add_to_erratacache_queue.called)

    def test_sync_success_regen(self):
        rs = _init_reposync(self.reposync)

        rs.urls = [{"source_url": ["http://none.host/bogus-url"], "id": 42, "metadata_signed": "N", "repo_label": None, 'repo_type': 'yum'}]

        _mock_rhnsql(self.reposync, {})
        rs = _mock_sync(self.reposync, rs)
        rs.regen = True
        rs.sync()

        self.assertEqual(self.reposync.taskomatic.add_to_repodata_queue_for_channel_package_subscription.call_args,
                         ((["Label"], [], "server.app.yumreposync"), {}))
        self.assertEqual(self.reposync.taskomatic.add_to_erratacache_queue.call_args,
                         (("Label", ), {}))

    def test_sync_raises_channel_timeout(self):
        rs = self._create_mocked_reposync()

        exception = self.reposync.ChannelTimeoutException("anony-error")
        rs.load_plugin = Mock(return_value=Mock(side_effect=exception))
        rs.sendErrorMail = Mock()

        etime, ret = rs.sync()
        self.assertEqual(-1, ret)
        self.assertEqual(rs.sendErrorMail.call_args,
                         (("anony-error", ), {}))
        self.assertEqual(self.reposync.log.call_args[0][1], exception)

    def test_sync_raises_unexpected_error(self):
        rs = self._create_mocked_reposync()

        rs.load_plugin = Mock(return_value=Mock(side_effect=TypeError))
        rs.sendErrorMail = Mock()
        etime, ret = rs.sync()
        self.assertEqual(-1, ret)

        error_string = self.reposync.log.call_args[0][1]
        assert (error_string.startswith('Traceback') and
                'TypeError' in error_string), (
            "The error string does not contain the keywords "
            "'Traceback' and 'TypeError':\n %s\n---end of assert" % error_string)

    def test_update_bugs(self):
        notice = {'references': [{'type': 'bugzilla',
                                  'id': '12345',
                                  'title': 'title1',
                                  'href': 'href1'},
                                 {'type': 'bugzilla',
                                  'id': 'string_id',
                                  'title': 'title2',
                                  'href': 'href2',
                                  'this': 'non-integer bz ids should be skipped'},
                                 {'type': 'bugzilla',
                                  'id': 'string_id',
                                  'title': 'title3',
                                  'href': 'http://dummyhost/show_bug.cgi?id=11111',
                                  'this': 'bz id parsed from href'},
                                 {'type': 'bugzilla',
                                  'id': '54321',
                                  'title': 'title2',
                                  'href': 'href2'},
                                 {'type': 'bugzilla',
                                  'id': '54321',
                                  'title': 'duplicate_id',
                                  'href': 'duplicate_id'},
                                 {'type': 'godzilla',
                                  'this': 'should be skipped'}]}
        bugs = self.reposync.RepoSync._update_bugs(notice)

        bug_values = [set(['12345', 'title1', 'href1']),
                      set(['54321', 'title2', 'href2']),
                      set(['11111', 'title3', 'http://dummyhost/show_bug.cgi?id=11111'])]

        self.assertEqual(len(bugs), 3)
        for bug in bugs:
            self.assertCountEqual(list(bug.keys()), ['bug_id', 'href', 'summary'])
            assert set(bug.values()) in bug_values, (
                "Bug set(%s) not in %s" % (list(bug.values()), bug_values))

    def test_update_cves(self):
        notice = {'references': [{'type': 'cve',
                                  'id': "CVE-1234-5678"},
                                 {'type': 'cve',
                                  'id': "CVE-1234-123456"},
                                 {'type': 'cve',
                                  'id': "CVE-1234-5678"},
                                 {'type': 'this should be skipped'}],
                  'description': None}
        cves = self.reposync.RepoSync._update_cve(notice)

        self.assertCountEqual(cves, ["CVE-1234-5678", "CVE-1234-123456"])

    def test_update_cves_with_description(self):
        notice = {'references': [{'type': 'cve',
                                  'id': "CVE-1234-5678"},
                                 {'type': 'cve',
                                  'id': "CVE-1234-1234"},
                                 {'type': 'cve',
                                  'id': "CVE-1234-5678"},
                                 {'type': 'this should be skipped'}],
                  'description': 'This is a text with two CVE numbers CVE-1234-5678, CVE-1234-567901'}
        cves = self.reposync.RepoSync._update_cve(notice)

        self.assertCountEqual(cves, ["CVE-1234-567901", "CVE-1234-5678", "CVE-1234-1234"])


    def test_update_keywords_reboot(self):
        notice = {'reboot_suggested': True,
                  'restart_suggested': False}

        keyword = self.reposync.importLib.Keyword()
        keyword.populate({'keyword': 'reboot_suggested'})
        self.assertEqual(self.reposync.RepoSync._update_keywords(notice),
                         [keyword])

    def test_update_keywords_restart(self):
        notice = {'reboot_suggested': False,
                  'restart_suggested': True}

        keyword = self.reposync.importLib.Keyword()
        keyword.populate({'keyword': 'restart_suggested'})
        self.assertEqual(self.reposync.RepoSync._update_keywords(notice),
                         [keyword])

    def test_update_keywords_restart_and_reboot(self):
        notice = {'reboot_suggested': True,
                  'restart_suggested': True}

        keyword_restart = self.reposync.importLib.Keyword()
        keyword_restart.populate({'keyword': 'restart_suggested'})
        keyword_reboot = self.reposync.importLib.Keyword()
        keyword_reboot.populate({'keyword': 'reboot_suggested'})
        self.assertEqual(self.reposync.RepoSync._update_keywords(notice),
                         [keyword_reboot, keyword_restart])

    def test_update_keywords_both_false(self):
        notice = {'reboot_suggested': False,
                  'restart_suggested': False}

        self.assertEqual(self.reposync.RepoSync._update_keywords(notice),
                         [])

    @patch("uyuni.common.context_managers.initCFG", Mock())
    def test_send_error_mail(self):
        rs = self._create_mocked_reposync()
        self.reposync.rhnMail.send = Mock()
        self.reposync.hostname = 'testhost'
        CFG = Mock()
        CFG.TRACEBACK_MAIL = 'recipient'

        with patch("uyuni.common.context_managers.CFG", CFG):
            rs.sendErrorMail('email body')

        self.assertEqual(self.reposync.rhnMail.send.call_args, (
                ({'To': 'recipient',
                  'From': 'testhost <recipient>',
                  'Subject': "SUSE Manager repository sync failed (testhost)"},
                 "Syncing Channel 'Label' failed:\n\nemail body"), {}))

    def test_updates_process_packages_simple(self):
        rs = self._create_mocked_reposync()

        packages = [{'name': 'n1',
                     'version': 'v1',
                     'release': 'r1',
                     'arch': 'arch1',
                     'channel_label': 'l1',
                     'epoch': []},
                    {'name': 'n2',
                     'version': 'v2',
                     'release': 'r2',
                     'arch': 'arch2',
                     'channel_label': 'l2',
                     'epoch': 'e2'}]
        checksum = {'epoch': None,
                    'checksum_type': None,
                    'checksum': None,
                    'id': None}

        _mock_rhnsql(self.reposync, checksum)
        processed = rs._updates_process_packages(packages, 'a name', [])
        for p in processed:
            self.assertTrue(isinstance(p, self.reposync.importLib.IncompletePackage))

    def test_updates_process_packages_returns_the_right_values(self):
        rs = self._create_mocked_reposync()

        packages = [{'name': 'n1',
                     'version': 'v1',
                     'release': 'r1',
                     'arch': 'arch1',
                     'epoch': []},
                    {'name': 'n2',
                     'version': 'v2',
                     'release': 'r2',
                     'arch': 'arch2',
                     'epoch': 'e2'}]

        checksum = {'epoch': 'cs_epoch',
                    'checksum_type': 'md5',
                    'checksum': '12345',
                    'id': 'cs_package_id'}

        _mock_rhnsql(self.reposync, checksum)
        processed = rs._updates_process_packages(packages, 'patchy', [])

        p1 = self.reposync.importLib.IncompletePackage()
        p1.populate({'package_size': None,
                     'name': 'n1',
                     'checksum_list': None,
                     'md5sum': None,
                     'org_id': 1,
                     'epoch': 'cs_epoch',
                     'channels': None,
                     'package_id': 'cs_package_id',
                     'last_modified': None,
                     'version': 'v1',
                     'checksum_type': 'md5',
                     'release': 'r1',
                     'checksums': {'md5': '12345'},
                     'checksum': '12345',
                     'arch': 'arch1'})
        p2 = self.reposync.importLib.IncompletePackage()
        p2.populate({'package_size': None,
                     'name': 'n2',
                     'checksum_list': None,
                     'md5sum': None,
                     'org_id': 1,
                     'epoch': 'cs_epoch',
                     'channels': None,
                     'package_id': 'cs_package_id',
                     'last_modified': None,
                     'version': 'v2',
                     'checksum_type': 'md5',
                     'release': 'r2',
                     'checksums': {'md5': '12345'},
                     'checksum': '12345',
                     'arch': 'arch2'})
        fixtures = [p1, p2]
        for pkg, fix in zip(processed, fixtures):
            self.assertEqual(pkg, fix)

    def test_updates_process_packages_checksum_not_found(self):
        rs = self._create_mocked_reposync()

        packages = [{'name': 'n2',
                     'version': 'v2',
                     'release': 'r2',
                     'arch': 'arch2',
                     'channel_label': 'l2',
                     'epoch': 'e2'}]
        ident = "%(name)s-%(epoch)s:%(version)s-%(release)s.%(arch)s" % packages[0]
        rs.available_packages[ident] = 1

        _mock_rhnsql(self.reposync, [])
        self.assertEqual(rs._updates_process_packages(packages, 'patchy', []),
                         [])
        self.assertEqual(self.reposync.log.call_args[0][1],
                "The package n2-e2:v2-r2.arch2 "
                "which is referenced by patch patchy was not found "
                "in the database. This patch has been skipped.")

    def test_updates_process_packages_checksum_not_found_no_epoch(self):
        rs = self._create_mocked_reposync()

        packages = [{'name': 'n1',
                     'version': 'v1',
                     'release': 'r1',
                     'arch': 'arch1',
                     'channel_label': 'l1',
                     'epoch': '' }]
        ident = "%(name)s-%(epoch)s%(version)s-%(release)s.%(arch)s" % packages[0]
        rs.available_packages[ident] = 1

        _mock_rhnsql(self.reposync, [])
        self.assertEqual(rs._updates_process_packages(packages, 'patchy', []),
                         [])
        self.assertEqual(self.reposync.log.call_args[0][1],
                "The package n1-v1-r1.arch1 "
                "which is referenced by patch patchy was not found "
                "in the database. This patch has been skipped.")

    def test_updates_process_packages_checksum_not_found_but_not_available(self):
        rs = self._create_mocked_reposync()

        packages = [{'name': 'n1',
                     'version': 'v1',
                     'release': 'r1',
                     'arch': 'arch1',
                     'channel_label': 'l1',
                     'epoch': '' }]

        _mock_rhnsql(self.reposync, [])
        self.assertEqual(rs._updates_process_packages(packages, 'patchy', []),
                         [])
        self.assertEqual(self.reposync.log.call_args, None)

    # RedHat has errata with empty package list
    # they removed the check - therefor this is disabled too
    #def test_upload_updates_referenced_package_not_found(self):
    #    timestamp1 = datetime.now().isoformat(' ')
    #    notices = [{'from': 'from1',
    #                'update_id': 'update_id1',
    #                'version': 'version1',
    #                'type': 'security',
    #                'severity': 'Low',
    #                'release': 'release1',
    #                'description': 'description1',
    #                'title': 'title1',
    #                'issued': timestamp1, # we mock _to_db_date anyway
    #                'updated': timestamp1,
    #                'pkglist': [{'packages': []}],
    #                'reboot_suggested': False,
    #                'restart_suggested': False,
    #                'references': None,
    #                }]
    #    self.reposync._to_db_date = Mock(return_value=timestamp1)

    #    # no packages related to this errata makes the ErrataImport be called
    #    # with an empty list
    #    self.reposync.RepoSync._updates_process_packages = Mock(return_value=[])
    #    self.reposync.get_errata = Mock(return_value=None)

    #    mocked_backend = Mock()
    #    self.reposync.SQLBackend = Mock(return_value=mocked_backend)
    #    self.reposync.ErrataImport = Mock()

    #    rs = self._create_mocked_reposync()
    #    rs._patch_naming = Mock(return_value='package-name')

    #    rs.upload_updates(notices)

    #    self.assertEqual(self.reposync.ErrataImport.call_args, None)

    def test_associate_package(self):
        pack = ContentPackage()
        pack.setNVREA('name1', 'version1', 'release1', 'epoch1', 'arch1')
        pack.unique_id = 1
        pack.a_pkg = rhn_rpm.RPM_Package(None)
        pack.a_pkg.checksum = 'checksum1'
        pack.a_pkg.checksum_type = 'c_type1'
        pack.a_pkg.header = {'epoch': 'epoch1'}
        pack.checksums[1] = 'checksum1'

        mocked_backend = Mock()
        self.reposync.SQLBackend = Mock(return_value=mocked_backend)
        rs = self._create_mocked_reposync()
        rs._importer_run = Mock()
        rs.channel_label = 'Label1'
        rs.channel = {'id': 'channel1', 'org_id': 1}

        package = {'name': 'name1',
                   'version': 'version1',
                   'release': 'release1',
                   'epoch': 'epoch1',
                   'arch': 'arch1',
                   'checksum': 'checksum1',
                   'checksum_type': 'c_type1',
                   'org_id': 1,
                   'channels': [{'label': 'Label1', 'id': 'channel1'}]}
        refpack = importLib.IncompletePackage().populate(package)
        ipack = rs.associate_package(pack)
        self.assertEqual(ipack, refpack)

    def test_get_errata_no_advisories_found(self):
        rs = self._create_mocked_reposync()
        _mock_rhnsql(self.reposync, None)
        self.assertEqual(rs.get_errata('bogus'), None)

    def test_get_errata_advisories_but_no_channels(self):
        rs = self._create_mocked_reposync()
        _mock_rhnsql(self.reposync, [{'id': 42}, []])
        self.assertEqual(rs.get_errata('bogus'),
                         {'channels': [], 'id': 42, 'packages': []})

    def test_get_errata_success(self):
        rs = self._create_mocked_reposync()
        _mock_rhnsql(self.reposync, [{'id': 42}, ['channel1', 'channel2']])
        self.assertEqual(rs.get_errata('bogus'),
                         {'id': 42, 'channels': ['channel1', 'channel2'],
                          'packages': []})

    def test_get_compat_arches(self):
        _mock_rhnsql(self.reposync, ({'label': 'a1'}, {'label':'a2'}))
        self.assertEqual(self.reposync.RepoSync.get_compatible_arches(None),
                         ['a1', 'a2'])

    def test_get_channel_arch(self):
        _mock_rhnsql(self.reposync, {'label': 'channel-amd64-deb'})
        self.assertEqual(self.reposync.RepoSync.get_channel_arch(None), 'amd64')

    def test_set_repo_credentials_no_credentials(self):
        url = {'source_url': "http://example.com"}
        rs = self._create_mocked_reposync()

        rs.set_repo_credentials(url)
        self.assertEqual(url['source_url'], "http://example.com")

    def test_set_repo_credentials_old_default_credentials_bad(self):
        url = {
            "source_url": [
                "http://example.com/?credentials=testcreds"
            ]
        }
        rs = self._create_mocked_reposync()
        self.assertRaises(SystemExit, rs.set_repo_credentials, url)

    def test_set_repo_credentials_bad_credentials(self):
        rs = _init_reposync(self.reposync)
        rs.error_msg = Mock()
        url = {
            "source_url": [
                "http://example.com/?credentials=bad_creds_with_underscore"
            ]
        }
        self.assertRaises(SystemExit, rs.set_repo_credentials, url)

    def test_set_repo_credentials_number_credentials(self):
        rs = self._create_mocked_reposync()
        url = {
            "source_url": [
                "http://example.com/?credentials=testcreds_42"
            ]
        }
        _mock_rhnsql(self.reposync, [{ 'username' : 'foo', 'password': '******' , 'extra_auth': memoryview(b'{\"my_header\":  \"my_value\"}')}])
        self.assertEqual(
            rs.set_repo_credentials(url), [{"url":"http://*****:*****@example.com/", "http_headers": {"my_header": "my_value"}}])

    def test_is_old_style(self):
        """
        Test for _is_old_suse_style
        """
        notice = {'from': '*****@*****.**',
                  'version': '1111',
                  'update_id': 'sles-kernel-default'}
        self.assertTrue(self.reposync.RepoSync._is_old_suse_style(notice))

        notice = {'from': '*****@*****.**',
                  'version': '7',
                  'update_id': 'res5ct-kernel-default'}
        self.assertTrue(self.reposync.RepoSync._is_old_suse_style(notice))

        notice = {'from': '*****@*****.**',
                  'version': '1',
                  'update_id': 'sles-kernel-default'}
        self.assertFalse(self.reposync.RepoSync._is_old_suse_style(notice))

        notice = {'from': '*****@*****.**',
                  'version': '6',
                  'update_id': 'res5ct-kernel-default'}
        self.assertFalse(self.reposync.RepoSync._is_old_suse_style(notice))

    def test_to_db_date(self):
        """
        Test for _to_db_date
        """
        # Unsure datetime.fromtimestamp is always returning UTC times
        class DateTimeMock(datetime):
            @classmethod
            def fromtimestamp(cls, timestamp):
                return cls.utcfromtimestamp(timestamp)

        os.environ['TZ'] = 'UTC'
        time.tzset()
        with patch("spacewalk.satellite_tools.reposync.datetime", DateTimeMock):
            self.assertEqual(self.reposync.RepoSync._to_db_date('2015-01-02 01:02:03'), '2015-01-02 01:02:03')
            self.assertEqual(self.reposync.RepoSync._to_db_date('1420160523'), '2015-01-02 01:02:03')
            self.assertEqual(self.reposync.RepoSync._to_db_date('2015-01-02'), '2015-01-02 00:00:00')
            self.assertEqual(self.reposync.RepoSync._to_db_date('2015-09-02 13:39:49 UTC'), '2015-09-02 13:39:49')
            self.assertEqual(self.reposync.RepoSync._to_db_date('2015-01-02T02:02:03+0100'), '2015-01-02 01:02:03')
            self.assertRaises(ValueError, self.reposync.RepoSync._to_db_date, '2015-01-02T01:02:03+nonsense')

    def _create_mocked_reposync(self):
        """Create a fully mocked RepoSync"""
        rs = _init_reposync(self.reposync)
        rs.urls = [{'id': None, "source_url": ["http://none.host/bogus-url"], "metadata_signed": "N", "repo_label": None, 'repo_type': 'yum'}]
        rs = _mock_sync(self.reposync, rs)

        return rs
Example #58
0
 def __str__(self, print_doc=True):
     sio = StringIO()
     self.config_print(buf=sio, print_doc=print_doc)
     return sio.getvalue()
Example #59
0
from io import StringIO
sIO = StringIO('Hello\nWorld\nMartian')
print(sIO.getvalue())
while True:
    s = sIO.readline()
    if s == '':
        break
    print(s.strip())
Example #60
0
        df_train = dd.read_csv(pathToFile)
        df_train = df_train.compute()
        tfs = pd.read_csv('src/tissue_tfs.csv', header=None)
        genes = pd.read_csv('src/tissue_genes.csv', header=None)
        tfs.columns = ['', '']
        genes.columns = ['', '']
        for sampleid in range(1, nSamples[k] + 1):
            sampleNet = pd.DataFrame(
                index=tfs.iloc[1:, 1],
                columns=genes.iloc[:, 1],
                data=df_train.iloc[:, sampleid].values.reshape((644, 30243)))
            fileName = tissuesCase[k] + '_sample' + '_' + df_train.columns[
                sampleid] + '.csv'
            bucket = 'granddb'  # already created on S3
            csv_buffer = StringIO()
            sampleNet.to_csv(csv_buffer)
            s3_resource = boto3.resource('s3')
            res = s3_resource.Object(
                bucket, 'tissues/networks/lioness/singleSample/' +
                fileName).put(Body=csv_buffer.getvalue())
            if res['ResponseMetadata']['HTTPStatusCode'] == 200:
                os.system(
                    'aws s3api put-object-acl --bucket granddb --key tissues/networks/lioness/singleSample/'
                    + fileName + ' --acl public-read')
                resURL = 'https://granddb.s3.amazonaws.com/tissues/networks/lioness/singleSample/' + fileName
                print(resURL)
            else:
                print('error')
        os.system('rm ' + pathToFile)
    k = k + 1