def _post_entry(fixt_idx=0, **kwargs): data = { **dict( title=f"Virtual_reality_{fixt_idx}", text=fixtures.entries[f"Virtual_reality_{fixt_idx}"].text, no_ai=False, tags=u.user.tag1, ), **kwargs } res = client.post("/entries", json=data, **u.user.header) assert res.status_code == 200 eid = res.json()['id'] if not data['no_ai']: # summary job got created sql = """ select id from jobs where method='entries' and data_in->'args'->>0=:eid """ args = {'eid': eid} assert M.await_row(db, sql, args=args, timeout=2) # summaries generated sql += " and state='done'" assert M.await_row(db, sql, args=args, timeout=120) res = client.get(f"/entries/{eid}", **u.user.header) assert res.status_code == 200 res = res.json() assert res['ai_ran'] is True assert res['title_summary'] assert res['text_summary'] return eid
def test_entries_ml(post_entry, db, client, u, ml_jobs, submit_job): # TODO use wikipedia entries to actually test qualitative results post_entry(0, no_ai=False) post_entry(1, no_ai=False) post_entry(2, no_ai=False) main_tag = list(u.user.tag1.keys()) limit_entries = {'days': 10, 'tags': main_tag} res = submit_job("themes", data=limit_entries) assert res['terms'] assert len(res['themes']) > 0 main_tag = list(u.user.tag1.keys()) limit_entries = {'days': 10, 'tags': main_tag} ml_jobs(limit_entries, 200) sql = "select id from jobs where method='books'" assert M.await_row(db, sql, timeout=100) sql = "select id, state from jobs where state in ('done', 'error') and method='books'" res = M.await_row(db, sql, timeout=200) assert res assert res.state == 'done' res = client.get(f"/books/ai", **u.user.header) assert res.status_code == 200 res = res.json() assert len(res) > 0
def db(client): """await client to init_db""" with D.session() as sess: # wait for GPU to restart from no-db crash while True: sql = "select 1 from machines where status='on'" if M.await_row(sess, sql): break time.sleep(.5) yield sess
def test_therapists(db, client, u, post_entry): post_entry(0) post_entry(1) post_entry(2) post_entry(3) # kick off profiles job client.put("/profile", json=dict(first_name="Tyler"), **u.user.header) sql = "select 1 from profile_matches where user_id=:uid" res = M.await_row(db, sql, dict(uid=u.user.id), timeout=20) assert res res = client.get("/therapists", **u.user.header) assert res.status_code == 200 res = res.json() # assert len(res) == len([ # 1 for k, v in fixtures.users.items() # if k.startswith('therapist_') # ]) assert len(res) == 3
def test_influencers(client, u, db, count): # TODO set these up in fixture. Currently `u` fixture is function-scope, and wipes user b/w uid, header = u.user.id, u.user.header fixtures.submit_fields(uid, db, client, header) # set last_updated so it's stale # The other stale/fresh/just-right checks are in gpu/tests db.execute( """ update users set updated_at=now() - interval '5 days', last_influencers=now() - interval '5 days' where id=:uid """, dict(uid=uid)) db.commit() client.post('user/checkin', **header) # run cron jid = ml.run_influencers() sql = "select 1 from jobs where id=:jid and state='done'" assert M.await_row(db, sql, {'jid': jid}, timeout=200) assert count("influencers") > 0 fs = client.get('/fields', **header) assert fs.status_code == 200 fs = fs.json() inf = client.get('/influencers', **header) assert inf.status_code == 200 inf = inf.json() assert type(inf) == dict for fid, f in fs.items(): assert f['next_pred'] is not None assert f['influencer_score'] is not None assert type(inf[fid]) == dict total_score = 0 for _, score in inf[fid].items(): assert score is not None total_score += score assert total_score > 0