Example #1
0
def test_regression_keep_cols_multi_fast_max_batch(live_server, tmpdir, ui):
    test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
                    in_fixture='tests/fixtures/regression.csv',
                    out_fixture='tests/fixtures/regression_output_yx.csv',
                    fast_mode=True,
                    max_batch_size=100)

    logs = read_logs()
    assert "bytes, splitting" in logs
def test_args_from_subprocess(live_server):
    # train one model in project
    with tempfile.NamedTemporaryFile(prefix='test_',
                                     suffix='.csv', delete=True) as fd:
        pass
    bscore_name = 'batch_scoring'
    if os.name is 'nt':
        exe = sys.executable
        head = os.path.split(exe)[0]
        bscore_name = os.path.normpath(os.path.join(head, 'scripts',
                                       'batch_scoring.exe'))
        assert os.path.isfile(bscore_name) is True
        assert os.path.supports_unicode_filenames is True
    arguments = ('{bscore_name} --host={webhost}/api'
                 ' --user={username}'
                 ' --password={password}'
                 ' {project_id}'
                 ' --verbose'
                 ' {model_id}'
                 ' tests/fixtures/temperatura_predict.csv'
                 ' --n_samples=10'
                 ' --n_concurrent=1'
                 ' --out={out}'
                 ' --no').format(webhost=live_server.url(),
                                 bscore_name=bscore_name,
                                 username='******',
                                 password='******',
                                 project_id='56dd9570018e213242dfa93c',
                                 model_id='56dd9570018e213242dfa93d',
                                 out=fd.name)
    try:
        spc = subprocess.check_call(arguments.split(' '))
    except subprocess.CalledProcessError as e:
        print(e)
        read_logs()

    #  newlines will be '\r\n on windows and \n on linux. using 'rU' should
    #  resolve differences on different platforms
    with open(fd.name, 'rU') as o:
        actual = o.read()
    with open('tests/fixtures/temperatura_output.csv', 'rU') as f:
        expected = f.read()
    assert str(actual) == str(expected)
    assert spc is 0
Example #3
0
def test_regression_bad_csv(live_server, tmpdir, ui):

    test_regression(live_server, tmpdir, ui,
                    in_fixture='tests/fixtures/regression_bad.csv',
                    out_fixture=None,
                    fast_mode=False,
                    expected_ret=1)

    logs = read_logs()
    assert "Error parsing CSV file after line 1000, error: " in logs
Example #4
0
    async def history(self, ctx, member_descriptor, page=None):
        member = await get_member(self.bot, member_descriptor)
        if not member:
            await ctx.send(embed=await error(
                f"No member found by descriptor '{member_descriptor}'"))
            return

        if page == None: page = 1

        try:
            page = int(page)
        except (ValueError, TypeError):
            await ctx.send(embed=await error(f"Invalid page number '{page}'"))
            return

        logs = read_logs(self.bot)

        events = []

        for event in itertools.chain(logs["warn_log"], logs["mute_log"],
                                     logs["kick_log"], logs["ban_log"]):
            if int(list(event.keys())[0]) == member.id:
                event_details = event[str(member.id)]
                valid_logs = ("warned", "muted", "kicked", "banned")
                valid_keys = [
                    key for key in event_details.keys() if key in valid_logs
                ]
                if valid_keys:
                    events.append((valid_keys[0], event_details))

        events = [events[i:i + 10] for i in range(0, len(events), 10)]

        embed = discord.Embed(color=EmbedColor.dark_green)

        if not events:
            embed.description = "No logged data"
            events = [None]

        else:
            if not 0 < page <= len(events):
                await ctx.send(embed=await error(
                    f"Invalid page number '{page}', must be 1-{len(events)}"))
                return
            for event in events[page - 1]:
                name = event[0]
                value = event[1]["reason"]
                if not value:
                    value = u"\u200b"
                embed.add_field(name=name, value=value, inline=False)

        embed.set_author(name=f"{member.name}'s moderation history",
                         icon_url=member.avatar_url)
        embed.set_footer(text=f"Page {page}/{len(events)}")

        await ctx.send(embed=embed)
def test_request_client_timeout(live_server, tmpdir, ui):
    live_server.app.config['PREDICTION_DELAY'] = 3
    out = tmpdir.join('out.csv')
    base_url = '{webhost}/api/v1/'.format(webhost=live_server.url())
    ret = run_batch_predictions(
        base_url=base_url,
        base_headers={},
        user='******',
        pwd='password',
        api_token=None,
        create_api_token=False,
        pid='56dd9570018e213242dfa93c',
        lid='56dd9570018e213242dfa93d',
        import_id=None,
        n_retry=3,
        concurrent=1,
        resume=False,
        n_samples=10,
        out_file=str(out),
        keep_cols=None,
        delimiter=None,
        dataset='tests/fixtures/temperatura_predict.csv.gz',
        pred_name=None,
        timeout=1,
        ui=ui,
        auto_sample=False,
        fast_mode=False,
        dry_run=False,
        encoding='',
        skip_dialect=False
    )

    assert ret is 1
    returned = out.read_text('utf-8')
    assert '' in returned, returned
    logs = read_logs()
    assert textwrap.dedent("""The server did not send any data
in the allotted amount of time.
You might want to decrease the "--n_concurrent" parameters
or
increase "--timeout" parameter.
""") in logs
Example #6
0
def test_request_log_client_error(live_server, tmpdir, ui):
    live_server.app.config["FAIL_GRACEFULLY_AT"] = [8, 9]

    out = tmpdir.join('out.csv')
    base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
    ret = run_batch_predictions(
        base_url=base_url,
        base_headers={},
        user='******',
        pwd='password',
        api_token=None,
        create_api_token=False,
        pid='56dd9570018e213242dfa93c',
        lid='56dd9570018e213242dfa93d',
        import_id=None,
        n_retry=3,
        concurrent=2,
        resume=False,
        n_samples=5,
        out_file=str(out),
        keep_cols=None,
        delimiter=None,
        dataset='tests/fixtures/temperatura_predict.csv.gz',
        pred_name=None,
        timeout=None,
        ui=ui,
        auto_sample=False,
        fast_mode=False,
        dry_run=False,
        encoding='',
        skip_dialect=False
    )
    assert ret is None

    actual = out.read_text('utf-8')
    assert len(actual.splitlines()) == 101

    logs = read_logs()

    assert 'failed with status code 400 message: Requested failure' in logs
def test_compression(live_server, tmpdir, ui):
    out = tmpdir.join('out.csv')
    base_url = '{webhost}/api/v1/'.format(webhost=live_server.url())
    ret = run_batch_predictions(
        base_url=base_url,
        base_headers={},
        user='******',
        pwd='password',
        api_token=None,
        create_api_token=False,
        pid='56dd9570018e213242dfa93c',
        lid='56dd9570018e213242dfa93d',
        import_id=None,
        n_retry=3,
        concurrent=2,
        resume=False,
        n_samples=100,
        out_file=str(out),
        keep_cols=None,
        delimiter=None,
        dataset='tests/fixtures/regression_jp.csv.gz',
        pred_name=None,
        timeout=30,
        ui=ui,
        auto_sample=False,
        fast_mode=False,
        dry_run=False,
        encoding='',
        skip_dialect=False,
        compression=True
    )
    assert ret is None

    actual = out.read_text('utf-8')
    assert len(actual.splitlines()) == 1411

    logs = read_logs()
    assert "space savings" in logs
def test_request_pool_is_full(live_server, tmpdir, ui):
    live_server.app.config["PREDICTION_DELAY"] = 1

    out = tmpdir.join('out.csv')

    base_url = '{webhost}/api/v1/'.format(webhost=live_server.url())
    ret = run_batch_predictions(
        base_url=base_url,
        base_headers={},
        user='******',
        pwd='password',
        api_token=None,
        create_api_token=False,
        pid='56dd9570018e213242dfa93c',
        lid='56dd9570018e213242dfa93d',
        import_id=None,
        n_retry=3,
        concurrent=30,
        resume=False,
        n_samples=10,
        out_file=str(out),
        keep_cols=None,
        delimiter=None,
        dataset='tests/fixtures/criteo_top30_1m.csv.gz',
        pred_name=None,
        timeout=30,
        ui=ui,
        auto_sample=False,
        fast_mode=False,
        dry_run=False,
        encoding='',
        skip_dialect=False
    )
    assert ret is None

    logs = read_logs()
    assert "Connection pool is full" not in logs
Example #9
0
def test_os_env_proxy_handling(live_server, tmpdir, ui):
    os.environ["HTTP_PROXY"] = "http://localhost"

    out = tmpdir.join('out.csv')
    base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
    with pytest.raises(SystemExit):
        ret = run_batch_predictions(
            base_url=base_url,
            base_headers={},
            user='******',
            pwd='password',
            api_token=None,
            create_api_token=False,
            pid='56dd9570018e213242dfa93c',
            lid='56dd9570018e213242dfa93d',
            import_id=None,
            n_retry=1,
            concurrent=2,
            resume=False,
            n_samples=1,
            out_file=str(out),
            keep_cols=None,
            delimiter=None,
            dataset='tests/fixtures/temperatura_predict.csv.gz',
            pred_name=None,
            timeout=None,
            ui=ui,
            auto_sample=False,
            fast_mode=False,
            dry_run=False,
            encoding='',
            skip_dialect=False)
        assert ret is 1

    logs = read_logs()
    assert "Failed to establish a new connection" in logs
    os.environ["HTTP_PROXY"] = ""
Example #10
0
def log():
    templateData = {'title': 'Log', 'logs': read_logs()}
    return render_template("log.html", **templateData)