예제 #1
0
    def test_api_key_should_be_revoked(self):
        user = user_factory(id=67890)
        # The test csv contains an entry with this user and the "right" secret.
        right_secret = (
            'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
        apikey = APIKey.objects.create(
            key='user:{}:{}'.format(user.pk, '333'), secret=right_secret,
            user=user, is_active=True)
        stdout = StringIO()
        call_command('revoke_api_keys', self.csv_path, stdout=stdout)
        stdout.seek(0)
        output = stdout.readlines()
        assert output[0] == (
            'Ignoring APIKey user:12345:666, it does not exist.\n')
        assert output[1] == (
            'Revoked APIKey user:67890:333.\n')
        assert output[2] == (
            'Ignoring APIKey garbage, it does not exist.\n')
        assert output[3] == (
            'Done. Revoked 1 keys out of 3 entries.\n')

        # API key is now inactive, secret hasn't changed, the other user api
        # key is still there, there are no additional APIKeys.
        apikey.reload()
        assert apikey.secret == right_secret
        assert apikey.is_active is None
        assert APIKey.objects.filter(user=user).count() == 2
        assert APIKey.objects.filter(user=user, is_active=True).count() == 1
예제 #2
0
파일: misc.py 프로젝트: pylada/pylada-light
def write_f90namelist(f90namelist, stream=None):
    """ Writes namelist to file or string, or stream

        - if stream is None (default), then returns a string containing namelist in fortran
            format
        - if stream is a string, then it should a path to a file
        - otherwise, stream is assumed to be a stream of some sort, with a `write` method

        Keywords are passed on to :py:method:`Namelist.namelist`
    """
    from f90nml import Namelist as F90Namelist
    from six import StringIO
    from ..misc import local_path
    if stream is None:
        result = StringIO()
        write_f90namelist(f90namelist, result)
        result.seek(0)
        return result.read()

    if isinstance(stream, str):
        path = local_path(stream)
        logger.log(10, "Writing fortran namelist to %s" % path)
        with open(path, 'w') as file:
            write_f90namelist(f90namelist, file)
        return
    f90namelist.write(stream)
    def test__read_write_credentials_file(self):
        credentials = _create_test_credentials()
        contents = StringIO()

        multiprocess_file_storage._write_credentials_file(
            contents, {'key': credentials})

        contents.seek(0)
        data = json.load(contents)
        self.assertEqual(data['file_version'], 2)
        self.assertTrue(data['credentials']['key'])

        # Read it back.
        contents.seek(0)
        results = multiprocess_file_storage._load_credentials_file(contents)
        self.assertEqual(
            results['key'].access_token, credentials.access_token)

        # Add an invalid credential and try reading it back. It should ignore
        # the invalid one but still load the valid one.
        data['credentials']['invalid'] = '123'
        results = multiprocess_file_storage._load_credentials_file(
            StringIO(json.dumps(data)))
        self.assertNotIn('invalid', results)
        self.assertEqual(
            results['key'].access_token, credentials.access_token)
예제 #4
0
    def fetch_data(self):
        # create a data frame directly from the full text of
        # the response from the returned file-descriptor.
        data = self.fetch_url(self.url)
        fd = StringIO()

        if isinstance(data, str):
            fd.write(data)
        else:
            for chunk in data:
                fd.write(chunk)

        self.fetch_size = fd.tell()

        fd.seek(0)

        try:
            # see if pandas can parse csv data
            frames = read_csv(fd, **self.pandas_kwargs)

            frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8'))
            self.fetch_hash = frames_hash.hexdigest()
        except pd.parser.CParserError:
            # could not parse the data, raise exception
            raise Exception('Error parsing remote CSV data.')
        finally:
            fd.close()

        return frames
    def _run_doit(self, sel_tasks, reporter=None, doit_vars=None):
        """load this file as dodo file to collect tasks"""
        inc = IncrementalTasks(self.py_files, test_files=list(self.test_files))
        output = StringIO()
        config = {
            'dep_file': self.DB_FILE,
            'continue': True,
            'outfile': output,
        }
        if reporter:
            config['reporter'] = reporter

        ctx = {
            'tasks_generator': inc,
            'DOIT_CONFIG': config,
        }
        doit_cmd.reset_vars()
        if doit_vars:
            for key, value in doit_vars.items():
                doit_cmd.set_var(key, value)
        loader = ModuleTaskLoader(ctx)
        cmd = Run(task_loader=loader)
        cmd.parse_execute(sel_tasks)
        output.seek(0)
        return inc.graph, output.read()
예제 #6
0
 def test_discover(self):
     org = 'quay.io/cogniteev'
     truncated_config = StringIO()
     config = load_config(self.CONFIG_FILE)
     config['organizations'][org]['repositories'] = None
     save_config(config, truncated_config)
     truncated_config.seek(0, 0)
     discover_command(['cogniteev'], truncated_config, interactive=False)
     config = load_config(self.CONFIG_FILE)
     tags = config.get('organizations', {})\
         .get('quay.io/cogniteev', {})\
         .get('repositories', {})\
         .get('docido-contrib-crawlers', {})\
         .get('on_build', {})\
         .get('tags', {})
     self.assertEqual(len(tags), 1)
     latest_trigger = tags.get('latest')
     self.assertEqual(len(latest_trigger), 1)
     github_repo = 'quay.io/cogniteev/docido-pull-crawler-github'
     assertCountEqual(
         self,
         latest_trigger.get(github_repo),
         [
             dict(trigger_uuid="dcb1e958-9fdb-4e9b-9856-4d52771b3df9",
                  ref="refs/heads/develop"),
             dict(trigger_uuid="567da7a3-0373-4cf2-8480-58a18b8dbe47",
                  ref="refs/tags/v1.1"),
         ]
     )
예제 #7
0
    def test_dump_and_load(self):
        in_resource = Book(
            title="Consider Phlebas",
            isbn="0-333-45430-8",
            num_pages=471,
            rrp=19.50,
            fiction=True,
            genre="sci-fi",
            authors=[Author(name="Iain M. Banks")],
            publisher=Publisher(name="Macmillan"),
            published=[datetime.datetime(1987, 1, 1)],
        )

        fp = StringIO()
        yaml_codec.dump(in_resource, fp)

        fp.seek(0)
        out_resource = yaml_codec.load(fp)

        assert out_resource.title == in_resource.title
        assert out_resource.isbn == in_resource.isbn
        assert out_resource.num_pages == in_resource.num_pages
        assert out_resource.rrp == in_resource.rrp
        assert out_resource.fiction == in_resource.fiction
        assert out_resource.genre == in_resource.genre
        assert out_resource.authors[0].name == in_resource.authors[0].name
        assert out_resource.publisher.name == in_resource.publisher.name
        assert out_resource.published[0] == in_resource.published[0]
예제 #8
0
    def test_dump_and_load(self):
        in_resource = Book(
            title='Consider Phlebas',
            isbn='0-333-45430-8',
            num_pages=471,
            rrp=19.50,
            fiction=True,
            genre="sci-fi",
            authors=[Author(name="Iain M. Banks")],
            publisher=Publisher(name="Macmillan"),
            published=[datetime.datetime(1987, 1, 1)]
        )

        fp = StringIO()
        yaml_codec.dump(in_resource, fp)

        fp.seek(0)
        out_resource = yaml_codec.load(fp)

        self.assertEqual(out_resource.title, in_resource.title)
        self.assertEqual(out_resource.isbn, in_resource.isbn)
        self.assertEqual(out_resource.num_pages, in_resource.num_pages)
        self.assertEqual(out_resource.rrp, in_resource.rrp)
        self.assertEqual(out_resource.fiction, in_resource.fiction)
        self.assertEqual(out_resource.genre, in_resource.genre)
        self.assertEqual(out_resource.authors[0].name, in_resource.authors[0].name)
        self.assertEqual(out_resource.publisher.name, in_resource.publisher.name)
        self.assertEqual(out_resource.published[0], in_resource.published[0])
예제 #9
0
def test_to_string3():
    # Test printing
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 2
RV Names:       None

x    p(x)
00   0.25
01   0.25
10   0.25
11   0.25"""

    # context manager?
    import sys
    from six import StringIO
    sio = StringIO()
    try:
        old = sys.stdout
        sys.stdout = sio
        print(d, end='')
    finally:
        sys.stdout = old
    sio.seek(0)
    s = sio.read()
    assert_equal(s, s_)
예제 #10
0
def runquery_csv():
	global out

	q = frappe.form_dict.get('query')

	rep_name = frappe.form_dict.get('report_name')
	if not frappe.form_dict.get('simple_query'):

		# Report Name
		if not rep_name:
			rep_name = get_sql_tables(q)[0]

	if not rep_name: rep_name = 'DataExport'

	rows = [[rep_name], out['colnames']] + out['values']

	from six import StringIO
	import csv

	f = StringIO()
	writer = csv.writer(f)
	for r in rows:
		# encode only unicode type strings and not int, floats etc.
		writer.writerow(map(lambda v: isinstance(v, text_type) and v.encode('utf-8') or v, r))

	f.seek(0)
	out['result'] = text_type(f.read(), 'utf-8')
	out['type'] = 'csv'
	out['doctype'] = rep_name
예제 #11
0
    def test_roundtrip_sequence_collections_and_alignments(self):
        fps = list(map(lambda e: list(map(get_data_path, e)),
                       [('empty', 'empty'),
                        ('fasta_sequence_collection_different_type',
                         'qual_sequence_collection_different_type')]))

        for reader, writer in ((_fasta_to_sequence_collection,
                                _sequence_collection_to_fasta),
                               (_fasta_to_alignment,
                                _alignment_to_fasta)):
            for fasta_fp, qual_fp in fps:
                # read
                obj1 = reader(fasta_fp, qual=qual_fp)

                # write
                fasta_fh = StringIO()
                qual_fh = StringIO()
                writer(obj1, fasta_fh, qual=qual_fh)
                fasta_fh.seek(0)
                qual_fh.seek(0)

                # read
                obj2 = reader(fasta_fh, qual=qual_fh)
                fasta_fh.close()
                qual_fh.close()

                self.assertEqual(obj1, obj2)
예제 #12
0
파일: httpserver.py 프로젝트: BBOOXX/stash
 def do_POST(self):
     """Serve a POST request."""
     r, info = self.deal_post_data()
     print(r, info, "by: ", self.client_address)
     f = StringIO()
     f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
     f.write("<html>\n<title>Upload Result Page</title>\n")
     f.write("<body>\n<h2>Upload Result Page</h2>\n")
     f.write("<hr>\n")
     if r:
         f.write("<strong>Success:</strong>")
     else:
         f.write("<strong>Failed:</strong>")
     f.write(info)
     f.write("<br><a href=\"%s\">back</a>" % self.headers['referer'])
     f.write("<hr><small>Powerd By: bones7456, check new version at ")
     f.write("<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
     f.write("here</a>.</small></body>\n</html>\n")
     length = f.tell()
     f.seek(0)
     self.send_response(200)
     self.send_header("Content-type", "text/html")
     self.send_header("Content-Length", str(length))
     self.end_headers()
     if f:
         self.copyfile(f, self.wfile)
         f.close()
예제 #13
0
    def test_start_subshell(self, call_mock, tempfile_mock):
        memfile = StringIO()
        memfile.name = 'FILENAME'
        tempfile_mock.return_value = memfile
        credentials = {'AWS_VALID_SECONDS': 600}
        start_subshell(credentials, 'ACCOUNT', 'ROLE')
        call_mock.assert_called_once_with(
            ["bash", "--rcfile", 'FILENAME'],
            stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin)
        expected = dedent("""
            # Pretend to be an interactive, non-login shell
            for file in /etc/bash.bashrc ~/.bashrc; do
                [ -f "$file" ] && . "$file"
            done

            function afp_minutes_left {
                if ((SECONDS >= 600)) ; then
                    echo EXPIRED
                else
                    echo $(((600-SECONDS)/60)) Min
                fi
            }

            PS1="(AWS ACCOUNT/ROLE \$(afp_minutes_left)) $PS1"
            export AWS_VALID_SECONDS='600'""")
        memfile.seek(0)
        received = memfile.read()
        self.assertEqual(received, expected)
예제 #14
0
def basic_config(config_file):
    from six import StringIO
    import yaml
    buff = StringIO()
    buff.write(config_file)
    buff.seek(0)
    return Config.from_dict(yaml.load(buff))
예제 #15
0
    def test_roundtrip_biological_sequences(self):
        fps = list(map(lambda e: list(map(get_data_path, e)),
                       [('fasta_multi_seq_roundtrip',
                         'qual_multi_seq_roundtrip'),
                        ('fasta_sequence_collection_different_type',
                         'qual_sequence_collection_different_type')]))

        for reader, writer in ((_fasta_to_biological_sequence,
                                _biological_sequence_to_fasta),
                               (partial(_fasta_to_dna_sequence,
                                        validate=False),
                                _dna_sequence_to_fasta),
                               (partial(_fasta_to_rna_sequence,
                                        validate=False),
                                _rna_sequence_to_fasta),
                               (partial(_fasta_to_protein_sequence,
                                        validate=False),
                                _protein_sequence_to_fasta)):
            for fasta_fp, qual_fp in fps:
                # read
                obj1 = reader(fasta_fp, qual=qual_fp)

                # write
                fasta_fh = StringIO()
                qual_fh = StringIO()
                writer(obj1, fasta_fh, qual=qual_fh)
                fasta_fh.seek(0)
                qual_fh.seek(0)

                # read
                obj2 = reader(fasta_fh, qual=qual_fh)
                fasta_fh.close()
                qual_fh.close()

                self.assertEqual(obj1, obj2)
예제 #16
0
파일: config.py 프로젝트: amueller/python
def _parse_config():
    """Parse the config file, set up defaults.
    """
    defaults = {'apikey': apikey,
                'server': server,
                'verbosity': 0,
                'cachedir': os.path.expanduser('~/.openml/cache'),
                'avoid_duplicate_runs': 'True'}

    config_file = os.path.expanduser('~/.openml/config')
    config = configparser.RawConfigParser(defaults=defaults)

    if not os.path.exists(config_file):
        # Create an empty config file if there was none so far
        fh = open(config_file, "w")
        fh.close()
        logger.info("Could not find a configuration file at %s. Going to "
                    "create an empty file there." % config_file)

    try:
        # Cheat the ConfigParser module by adding a fake section header
        config_file_ = StringIO()
        config_file_.write("[FAKE_SECTION]\n")
        with open(config_file) as fh:
            for line in fh:
                config_file_.write(line)
        config_file_.seek(0)
        config.readfp(config_file_)
    except OSError as e:
        logging.info("Error opening file %s: %s" %
                     config_file, e.message)
    return config
예제 #17
0
def write_f90namelist(f90namelist, stream=None):
    """ Writes namelist to file or string, or stream

        - if stream is None (default), then returns a string containing namelist in fortran
            format
        - if stream is a string, then it should a path to a file
        - otherwise, stream is assumed to be a stream of some sort, with a `write` method

        Keywords are passed on to :py:method:`Namelist.namelist`
    """
    from f90nml import Namelist as F90Namelist
    from six import StringIO
    from ..misc import local_path
    if stream is None:
        result = StringIO()
        write_f90namelist(f90namelist, result)
        result.seek(0)
        return result.read()

    if isinstance(stream, str):
        path = local_path(stream)
        logger.log(10, "Writing fortran namelist to %s" % path)
        with open(path, 'w') as file:
            write_f90namelist(f90namelist, file)
        return

    for key, value in f90namelist.items():
        if isinstance(value, list):
            for g_vars in value:
                f90namelist.write_nmlgrp(key, g_vars, stream)
        elif isinstance(value, F90Namelist):
            f90namelist.write_nmlgrp(key, value, stream)
        else:
            raise RuntimeError("Can only write namelists that consist of namelists")
예제 #18
0
파일: figure.py 프로젝트: Epispread/Pyfig
    def __init__(self, settings=None, setup=True, check=False):
        if isinstance(settings, six.string_types):
            settingsfile = StringIO()
            settings = re.sub(r" *\\\n *", " ", settings)
            settingsfile.write(settings)
            settingsfile.seek(0)
            self.settings = configobj.ConfigObj(
                settingsfile,
                configspec=os.path.join(config.CONFIG_DIR, "settings.spec"))
            tools.cobj_check(self.settings, exception=PyfigError)
        elif check:
            self.settings = configobj.ConfigObj(
                settings,
                configspec=os.path.join(config.CONFIG_DIR, "settings.spec"))
            tools.cobj_check(self.settings, exception=PyfigError)
        else:
            self.settings = settings

        self.rows = []
        self.cols = []
        self.title = None
        self.repo = self._get_repo()
        self.plotlines = collections.defaultdict(list)
        self.labels = collections.defaultdict(list)
        self.style = collections.defaultdict(
            lambda: collections.defaultdict(dict))

        if setup:
            matplotlib.figure.Figure.__init__(
                self, figsize=self.settings["figsize"])
            self.setup()
예제 #19
0
    def test_fetch(self, mocked_urlopen):
        mresponse = StringIO()
        mresponse.write('structureId,chainId,structureTitle,compound,ecNo,uniprotAcc,uniprotRecommendedName\n')
        mresponse.write('"104L","B","HOW AMINO-ACID INSERTIONS ARE ALLOWED IN AN ALPHA-HELIX OF T4 LYSOZYME","T4 LYSOZYME","3.2.1.17","P00720","Endolysin"\n')
        mresponse.write('"12E8","H","2E8 FAB FRAGMENT","IGG1-KAPPA 2E8 FAB (HEAVY CHAIN)","","",""\n')
        mresponse.seek(0)
        mocked_urlopen.return_value = mresponse

        pdb_report = PdbReport(['104L', '12E8'])

        pdbs = list(pdb_report.fetch())

        expected = [{
            'chainId': 'B',
            'structureId': '104L',
            'structureTitle': 'HOW AMINO-ACID INSERTIONS ARE ALLOWED IN AN ALPHA-HELIX OF T4 LYSOZYME',
            'ecNo': '3.2.1.17',
            'uniprotAcc': 'P00720',
            'compound': 'T4 LYSOZYME',
            'uniprotRecommendedName': 'Endolysin'
        }, {
            'chainId': 'H',
            'structureId': '12E8',
            'structureTitle': '2E8 FAB FRAGMENT',
            'ecNo': None,
            'uniprotAcc': None,
            'compound': 'IGG1-KAPPA 2E8 FAB (HEAVY CHAIN)',
            'uniprotRecommendedName': None
        }]
        eq_(pdbs, expected)
예제 #20
0
 def test_deprecated_io(self):
     fh = StringIO()
     npt.assert_warns(DeprecationWarning, self.ordination_results.to_file, fh)
     fh.seek(0)
     deserialized = npt.assert_warns(DeprecationWarning, OrdinationResults.from_file, fh)
     assert_ordination_results_equal(deserialized, self.ordination_results)
     self.assertTrue(type(deserialized) == OrdinationResults)
예제 #21
0
def _parse_config():
    """Parse the config file, set up defaults.
    """

    config = configparser.RawConfigParser(defaults=_defaults)

    if not os.path.exists(config_file):
        # Create an empty config file if there was none so far
        fh = open(config_file, "w")
        fh.close()
        logger.info("Could not find a configuration file at %s. Going to "
                    "create an empty file there." % config_file)

    try:
        # Cheat the ConfigParser module by adding a fake section header
        config_file_ = StringIO()
        config_file_.write("[FAKE_SECTION]\n")
        with open(config_file) as fh:
            for line in fh:
                config_file_.write(line)
        config_file_.seek(0)
        config.readfp(config_file_)
    except OSError as e:
        logging.info("Error opening file %s: %s", config_file, e.message)
    return config
예제 #22
0
 def test_export_csv_with_dialect(self):
     output_file = StringIO()
     self.t.to_csv(output_file=output_file, dialect="excel")
     output_file.seek(0)
     reader = csv.DictReader(output_file, dialect="excel")
     row = next(reader)
     self.assertEqual(row, {"A":"1", "B":"0.0", "C":"x"})
예제 #23
0
    def test_export_csv_with_dialect_types(self):
        output_file = StringIO()
        self.t.to_csv(output_file=output_file, dialect="excel", descriptions=True)
        output_file.seek(0)
        reader = csv.DictReader(output_file, dialect="excel")

        self.assertEqual(next(reader), {"A (int)":"1", "B (float)":"0.0", "C (str)":"x"})
예제 #24
0
 def write_to_db(self, db, transaction=None, commit=True):
     if transaction is None:
         transaction = db
     fp = StringIO()
     if len(self) < Timeseries.MAX_ALL_BOTTOM:
         top = ''
         middle = None
         self.write(fp)
         bottom = fp.getvalue()
     else:
         dates = sorted(self.keys())
         self.write(fp, end=dates[Timeseries.ROWS_IN_TOP_BOTTOM - 1])
         top = fp.getvalue()
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[Timeseries.ROWS_IN_TOP_BOTTOM],
                    end=dates[-(Timeseries.ROWS_IN_TOP_BOTTOM + 1)])
         middle = self.blob_create(
             zlib.compress(fp.getvalue().encode('ascii')))
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[-Timeseries.ROWS_IN_TOP_BOTTOM])
         bottom = fp.getvalue()
     fp.close()
     c = db.cursor()
     c.execute("DELETE FROM ts_records WHERE id=%d" % (self.id))
     c.execute("""INSERT INTO ts_records (id, top, middle, bottom)
                  VALUES (%s, %s, %s, %s)""", (self.id, top, middle,
               bottom))
     c.close()
     if commit:
         transaction.commit()
예제 #25
0
    def test_roundtrip_sequence_collections_and_alignments(self):
        fps = list(map(lambda e: list(map(get_data_path, e)),
                       [('empty', 'empty'),
                        ('fasta_sequence_collection_different_type',
                         'qual_sequence_collection_different_type')]))

        for reader, writer in ((_fasta_to_sequence_collection,
                                _sequence_collection_to_fasta),
                               (_fasta_to_alignment,
                                _alignment_to_fasta)):
            for fasta_fp, qual_fp in fps:
                # read
                obj1 = reader(fasta_fp, qual=qual_fp)

                # write
                fasta_fh = StringIO()
                qual_fh = StringIO()
                writer(obj1, fasta_fh, qual=qual_fh)
                fasta_fh.seek(0)
                qual_fh.seek(0)

                # read
                obj2 = reader(fasta_fh, qual=qual_fh)
                fasta_fh.close()
                qual_fh.close()

                # TODO remove this custom equality testing code when
                # SequenceCollection has an equals method (part of #656).
                # We need this method to include IDs and description in the
                # comparison (not part of SequenceCollection.__eq__).
                self.assertEqual(obj1, obj2)
                for s1, s2 in zip(obj1, obj2):
                    self.assertTrue(s1.equals(s2))
def test_write_csv_to_stream():
    observation = OrderedDict([
        ('datetime', datetime.datetime(2015, 1, 24, 18, tzinfo=pytz.UTC)),
        ('wind_direction', 'W'),
        ('wind_direction_degrees', 270.0),
        ('pressure_tendency', 'R'),
        ('screen_relative_humidity', 82.5),
        ('pressure', 1029.0),
        ('wind_speed', 16.0),
        ('temperature', 6.1),
        ('weather_type', 'Overcast'),
        ('visibility', 9000.0),
        ('dew_point', 3.4),
    ])

    stream = StringIO()
    write_csv_to_stream(stream, [observation])
    stream.seek(0)

    lines = stream.read().split('\r\n')

    expected_header = (
        'datetime,wind_direction,wind_direction_degrees,pressure_tendency,'
        'screen_relative_humidity,pressure,wind_speed,wind_gust,temperature,'
        'weather_type,visibility,dew_point')

    expected_line_1 = ','.join([
        '2015-01-24T18:00:00Z', 'W', '270.0', 'R', '82.5', '1029.0', '16.0',
        '', '6.1', 'Overcast', '9000.0', '3.4'])

    yield assert_equal, expected_header, lines[0]
    yield assert_equal, expected_line_1, lines[1]
예제 #27
0
def to_string(table):
    """
    Returns a list of the maximum width for each column across all rows
    >>> type(to_string([['foo', 'goodbye'], ['llama', 'bar']]))
    <type 'unicode'>
    """
    result = StringIO()

    (columns, rows) = get_dimensions(table)
        
    result.write("     {} columns, {} rows\n".format(columns, rows))
    col_widths = find_column_widths(table)
    table_width = sum(col_widths) + len(col_widths) + 2
    hbar = '    {}\n'.format('-' * table_width)

    result.write("      {}\n".format(' '.join(
        [six.text_type(col_index).rjust(width, ' ') for (col_index, width)
         in enumerate(col_widths)])))

    result.write(hbar)
    for row_index, row in enumerate(table):
        cells = [cell.rjust(width, ' ') for (cell, width)
                 in zip(row, col_widths)]
        result.write("{:>3} | {}|\n".format(row_index, '|'.join(cells)))
    result.write(hbar)
    result.seek(0)
    return six.text_type(result.read())
예제 #28
0
파일: SVGdraw.py 프로젝트: CGATOxford/cgat
 def toXml(self, filename='', compress=False):
     xml = StringIO()
     xml.write("<?xml version='1.0' encoding='UTF-8'?>\n")
     xml.write(
         "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd \">\n")
     self.svg.toXml(0, xml)
     if not filename:
         if compress:
             import gzip
             f = StringIO()
             zf = gzip.GzipFile(fileobj=f, mode='wb')
             zf.write(xml.getvalue())
             zf.close()
             f.seek(0)
             return f.read()
         else:
             return xml.getvalue()
     else:
         if filename[-4:] == 'svgz':
             import gzip
             f = gzip.GzipFile(
                 filename=filename, mode="wb", compresslevel=9)
             f.write(xml.getvalue())
             f.close()
         else:
             f = file(filename, 'w')
             f.write(xml.getvalue())
             f.close()
예제 #29
0
    def load_dataset(infile, selection, verbose=1, **kwargs):
        """
        Loads selected distribution from selected infile.

        Arguments:
          infile (str): Path to text input file
          selection (str): Start of lines containing desired dataset
          verbose (int): Level of verbose output

        Returns:
          dataset (DataFrame): Selected dataset
        """
        from six import StringIO
        import pandas

        if verbose >= 1:
            print("loading '{0}' from '{1}'".format(selection, infile))

        s = StringIO()
        with open(infile) as open_infile:
            for line in open_infile:
                if line.startswith(selection):
                    s.write(line)
        s.seek(0)

        dataset = pandas.read_csv(s, delim_whitespace=True, header=None,
          usecols=[3,4,5,6], names=["phi", "psi", "probability",
          "free energy"])

        return dataset
예제 #30
0
def port_project(dir_name, schemas, spiders, extractors):
    """Create project layout, default files and project specific code."""
    dir_name = class_name(dir_name)
    zbuff = StringIO()
    archive = UpdatingZipFile(zbuff, "w", zipfile.ZIP_DEFLATED)
    write_to_archive(archive, '', start_scrapy_project(dir_name).items())
    items_py, schema_names = create_schemas(schemas)
    write_to_archive(archive, dir_name, [('items.py', items_py)])
    write_to_archive(archive, dir_name, create_library_files())

    # XXX: Hack to load items.py file
    items_no_relative = items_py.replace(
        'from .utils.processors import', 'from portia2code.processors import'
    )
    mod = imp.new_module('%s.%s' % (dir_name, 'items'))
    exec(items_no_relative, mod.__dict__)
    items = vars(mod)

    # Load schema objects from module
    schema_names = {}
    for _id, name in schema_names.items():
        schema_names[_id] = items['%sItem' % name]
    schema_names['_PortiaItem'] = items['PortiaItem']

    spider_data = create_spiders(spiders, schemas, extractors, schema_names)
    write_to_archive(archive, dir_name, spider_data)
    archive.finalize()
    archive.close()
    zbuff.seek(0)
    return zbuff
예제 #31
0
 def test_fancy_dtype(self):
     c = StringIO()
     c.write('1,2,3.0\n4,5,6.0\n')
     c.seek(0)
     dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
     x = textadapter.loadtxt(c, dtype=dt, delimiter=',')
     a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
     assert_array_equal(x, a)
예제 #32
0
 def test_comments(self):
     c = StringIO()
     c.write('# comment\n1,2,3,5\n')
     c.seek(0)
     x = textadapter.loadtxt(c, dtype=int, delimiter=',', \
         comments='#')
     a = np.array([1, 2, 3, 5], int)
     assert_array_equal(x, a)
예제 #33
0
 def test_missing(self):
     c = StringIO()
     c.write('1,2,3,,5\n')
     c.seek(0)
     x = textadapter.loadtxt(c, dtype=int, delimiter=',', \
         converters={3:lambda s: int(s or - 999)})
     a = np.array([1, 2, 3, -999, 5], int)
     assert_array_equal(x, a)
예제 #34
0
def capture_stderr(target=None):
    original = sys.stderr
    if target is None:
        target = StringIO()
    sys.stderr = target
    yield target
    target.seek(0)
    sys.stderr = original
예제 #35
0
 def test_deprecated_io(self):
     fh = StringIO()
     npt.assert_warns(UserWarning, self.dm_3x3.to_file, fh)
     fh.seek(0)
     deserialized = npt.assert_warns(UserWarning,
                                     DissimilarityMatrix.from_file, fh)
     self.assertEqual(deserialized, self.dm_3x3)
     self.assertTrue(type(deserialized) == DissimilarityMatrix)
예제 #36
0
 def call_command(self, name, *args, **kwargs):
     """
     Call management command and return output
     """
     out = StringIO()  # To Capture the output of the command
     call_command(name, *args, stdout=out, **kwargs)
     out.seek(0)
     return out.read()
예제 #37
0
 def _run_dummy_command(self, *args, **kwargs):
     """
     Calls the test command and outputs a dict of the current context.
     """
     out = StringIO()
     call_command('tracked_dummy_command', *args, stdout=out, **kwargs)
     out.seek(0)
     return json.loads(out.read())
예제 #38
0
파일: put.py 프로젝트: s-tatus/pymogile
def put(s, uri, username=None, password=None):
    """HTTP PUT the string s to uri, with optional auth data."""
    f = StringIO(s)
    f.seek(0)
    status, resp = putfile(f, uri, username=username, password=password)
    f.close()

    return status, resp
예제 #39
0
 def test_export_course_stdout(self, store_type):
     test_course_key = self.create_dummy_course(store_type)
     out = StringIO()
     call_command('export_olx', six.text_type(test_course_key), stdout=out)
     out.seek(0)
     output = out.read()
     with tarfile.open(fileobj=StringIO(output)) as tar_file:
         self.check_export_file(tar_file, test_course_key)
예제 #40
0
def export_to_csv_buffer(data, tsv=False):
    kwargs = dict(encoding="utf-8", index=False)
    if tsv:
        kwargs["sep"] = "\t"
    csv_buffer = StringIO()
    data.to_csv(csv_buffer, **kwargs)
    csv_buffer.seek(0)
    return csv_buffer
 def run_generate_trips_command(self, *args, **kwargs):
     """
     Calls the test command and outputs a dict of the current context.
     """
     out = StringIO()
     call_command('generate_trips', *args, stdout=out, **kwargs)
     out.seek(0)
     return out.read().strip()
예제 #42
0
파일: utils.py 프로젝트: yg79/dtale
def export_to_csv_buffer(data, tsv=False):
    kwargs = dict(encoding='utf-8', index=False)
    if tsv:
        kwargs['sep'] = '\t'
    csv_buffer = StringIO()
    data.to_csv(csv_buffer, **kwargs)
    csv_buffer.seek(0)
    return csv_buffer
예제 #43
0
    def run_command(self, **options):
        options.setdefault('interactive', False)

        output = StringIO()
        management.call_command('fixtree', stdout=output, **options)
        output.seek(0)

        return output
예제 #44
0
파일: scen.py 프로젝트: openscm/pymagicc
    def _get_stream(self):
        # Create a stream to work with, ignoring any blank lines
        stream = StringIO()
        cleaned_lines = [line.strip() for line in self.lines if line.strip()]
        stream.write("\n".join(cleaned_lines))
        stream.seek(0)

        return stream
예제 #45
0
 def __str__(self):
     stream = StringIO()
     pprint.pprint(self.extractors, stream)
     stream.seek(0)
     template_data = stream.read()
     if template_data:
         return "%s[\n%s\n]" % (self.__class__.__name__, template_data)
     return "%s[none]" % (self.__class__.__name__)
예제 #46
0
 def test_outputs_tab(self):
     sample = StringIO()
     writer = csv.writer(sample, dialect=csv.excel_tab)
     writer.writerows(self.output_data)
     sample.seek(0)
     self.of.tab(self.output_data)
     self.outfile.seek(0)
     self.assertEqual(self.outfile.read(), sample.read())
예제 #47
0
 def test_alignment_to_clustal_with_bad_input(self):
     BAD = StringIO('\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
     result = _clustal_to_alignment(BAD, strict=False)
     self.assertEqual(dict(result), {})
     # should fail unless we turned strict processing off
     with self.assertRaises(ClustalFormatError):
         BAD.seek(0)
         dict(_clustal_to_alignment(BAD))
예제 #48
0
def test_load_no_template_paths():
    stream = StringIO()
    yaml.dump({}, stream)
    stream.seek(0)
    index = Index.load('', stream)
    assert index.targets == {}
    assert index.environment.keep_trailing_newline
    assert isinstance(index.environment.loader, FileSystemLoader)
예제 #49
0
 def test_outputs_csv(self):
     sample = StringIO()
     writer = csv.writer(sample)
     writer.writerows(self.output_data)
     sample.seek(0)
     self.of.csv(self.output_data)
     self.outfile.seek(0)
     self.assertEqual(self.outfile.read(), sample.read())
예제 #50
0
def test_syslog_format():
    broker = dr.Broker()
    output = StringIO()
    with SysLogFormat(broker, stream=output):
        dr.run(report, broker=broker)
    output.seek(0)
    data = output.read()
    assert SL_MSG in data
 def test_filter_encrypted(self):
     stdout = StringIO()
     with patch('sys.stdout', stdout):
         execute_from_command_line(['', 'listbackups', '--encrypted', '-q'])
     stdout.seek(0)
     stdout.readline()
     for line in stdout.readlines():
         self.assertIn('.gpg', line)
def qtree(q):
    q = optimize(q)
    f = StringIO()
    q.print_tree(out=f)
    f.seek(0)
    out = f.read()
    f.close()
    return out
예제 #53
0
def download_sample(name):
    a, b = name.split('.')
    f = StringIO(sample.export(a, b))
    f.seek(0)
    return send_file(f,
                     attachment_filename=name + '.json',
                     as_attachment=True,
                     add_etags=False)
예제 #54
0
def export_query():
	"""export from report builder"""
	form_params = get_form_params()
	form_params["limit_page_length"] = None
	form_params["as_list"] = True
	doctype = form_params.doctype
	add_totals_row = None
	file_format_type = form_params["file_format_type"]

	del form_params["doctype"]
	del form_params["file_format_type"]

	if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
		add_totals_row = 1
		del form_params["add_totals_row"]

	frappe.permissions.can_export(doctype, raise_exception=True)

	if 'selected_items' in form_params:
		si = json.loads(frappe.form_dict.get('selected_items'))
		form_params["filters"] = {"name": ("in", si)}
		del form_params["selected_items"]

	db_query = DatabaseQuery(doctype)
	ret = db_query.execute(**form_params)

	if add_totals_row:
		ret = append_totals_row(ret)

	data = [['Sr'] + get_labels(db_query.fields, doctype)]
	for i, row in enumerate(ret):
		data.append([i+1] + list(row))

	if file_format_type == "CSV":

		# convert to csv
		import csv
		from six import StringIO

		f = StringIO()
		writer = csv.writer(f)
		for r in data:
			# encode only unicode type strings and not int, floats etc.
			writer.writerow(map(lambda v: isinstance(v, text_type) and v.encode('utf-8') or v, r))

		f.seek(0)
		frappe.response['result'] = text_type(f.read(), 'utf-8')
		frappe.response['type'] = 'csv'
		frappe.response['doctype'] = doctype

	elif file_format_type == "Excel":

		from frappe.utils.xlsxutils import make_xlsx
		xlsx_file = make_xlsx(data, doctype)

		frappe.response['filename'] = doctype + '.xlsx'
		frappe.response['filecontent'] = xlsx_file.getvalue()
		frappe.response['type'] = 'binary'
예제 #55
0
    def test_index(self):
        if sys.platform == 'win32':
            # TODO: this test fails on Windows because of file lock problems
            return

        num_records = 100000
        expected_values = [((num_records-1)*5) + x for x in range(5)]

        data = StringIO()
        generate_dataset(data, IntIter(), ',', num_records)

        # test explicit index building
        adapter = iopro.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
        adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
        adapter.create_index()

        self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
        self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
        self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
        self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
        self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
        self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
        #self.assert_equality(adapter[-1].item(), tuple(expected_values))

        # test implicitly creating disk index on the fly
        if os.path.exists('test.idx'):
            os.remove('test.idx')
        data.seek(0)
        adapter = iopro.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
        adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
        adapter.to_array()

        self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
        self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
        self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
        self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
        self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
        self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
        #self.assert_equality(adapter[-1].item(), tuple(expected_values))

        adapter.close()

        # test loading disk index
        data.seek(0)
        adapter2 = iopro.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
        adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})

        self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
        self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
        self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
        self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
        self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
        self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
        #self.assert_equality(adapter2[-1].item(), tuple(expected_values))

        adapter.close()

        os.remove('test.idx')
예제 #56
0
    def to_string(self, digits=None, exact=None, tol=1e-9):
        """
        Returns a string representation of the distribution.

        Parameters
        ----------
        digits : int or None
            The probabilities will be rounded to the specified number of
            digits, using NumPy's around function. If `None`, then no rounding
            is performed. Note, if the number of digits is greater than the
            precision of the floats, then the resultant number of digits will
            match that smaller precision.
        exact : bool
            If `True`, then linear probabilities will be displayed, even if
            the underlying pmf contains log probabilities.  The closest
            rational fraction within a tolerance specified by `tol` is used
            as the display value.
        tol : float
            If `exact` is `True`, then the probabilities will be displayed
            as the closest rational fraction within `tol`.

        Returns
        -------
        s : str
            A string representation of the distribution.

        """
        from six import StringIO
        s = StringIO()

        if exact is None:
            exact = ditParams['print.exact']

        x = prepare_string(self, digits, exact, tol)
        pmf, outcomes, base, colsep, max_length, pstr = x

        headers = ["Class: ",
                   "Alphabet: ",
                   "Base: "]
        vals = [self.__class__.__name__,
                self.alphabet, # pylint: disable=no-member
                base]

        L = max(map(len, headers))
        for head, val in zip(headers, vals):
            s.write("{0}{1}\n".format(head.ljust(L), val))
        s.write("\n")

        s.write(''.join(['x'.ljust(max_length), colsep, pstr, "\n"]))
        for o, p in zip(outcomes, pmf):
            s.write(''.join([o.ljust(max_length), colsep, str(p), "\n"]))

        s.seek(0)
        s = s.read()
        # Remove the last \n
        s = s[:-1]

        return s
예제 #57
0
def test_setmeta_bytes():
    p = Properties()
    p["a key"] = "the value", {b"metakey": b"metaval"}

    out = StringIO()
    p.store(out, strip_meta=False, timestamp=False)

    out.seek(0)
    assert out.read() == "#: metakey=metaval\na\\ key=the value\n"
예제 #58
0
def test_json_format():
    broker = dr.Broker()
    output = StringIO()
    with JsonFormat(broker, stream=output):
        dr.run(report, broker=broker)
    output.seek(0)
    data = output.read()
    assert "foo" in data
    assert "bar" in data
예제 #59
0
    def test_management_command(self):
        """Test freeze management command"""
        stdout = StringIO()
        call_command('bower_freeze', stdout=stdout)
        stdout.seek(0)
        output = stdout.read()

        self.assertIn('BOWER_INSTALLED_APPS', output)
        self.assertIn('backbone', output)
예제 #60
0
def test_simple_html_format():
    broker = dr.Broker()
    output = StringIO()
    with SimpleHtmlFormat(broker, stream=output):
        dr.run(report, broker=broker)
    output.seek(0)
    data = output.read()
    assert "foo" in data
    assert "bar" in data