def main(api_endpoint): email = input('Enter Email: ') password = getpass.getpass('Enter password for user {}:'.format(email)) kauth = KiliAuth(email, password, api_endpoint=api_endpoint) playground = Playground(kauth) project_id = input('Enter project id: ') # Check and load new predictions STOP_CONDITION = True while STOP_CONDITION: tools = playground.get_tools(project_id=project_id) assert len(tools) == 1 categories = list( json.loads(tools[0]['jsonSettings'])['categories'].keys()) print('Export assets and labels...') assets = playground.export_assets(project_id=project_id) print('Done.\n') X, y, X_to_be_predicted, ids_X_to_be_predicted = extract_train_for_autoML( assets, categories) if len(X) > 5: print('Online Learning is on its way...') predictions = automl_train_and_predict(X, y, X_to_be_predicted) # Insert pre-annotations for i, prediction in enumerate(tqdm(predictions)): json_response = { 'categories': [{ 'name': categories[prediction], 'confidence': 100 }] } id = ids_X_to_be_predicted[i] playground.create_prediction(asset_id=id, json_response=json_response) print('Done.\n') time.sleep(SECONDS_BETWEEN_TRAININGS)
for name in files ] kauth = KiliAuth(email=email, password=password) playground = Playground(kauth) for filepath in tqdm(only_files[:MAX_NUMBER_OF_ASSET]): with open(filepath, 'r') as f: content = f.read() external_id = filepath # Insert asset playground.append_to_dataset(project_id=project_id, content=escape_content(content), external_id=external_id) asset = playground.get_assets_(project_id=project_id, external_id_contains=[external_id]) asset_id = asset[0]['id'] # Prioritize assets playground.update_properties_in_asset(asset_id=asset_id, priority=1) # Insert pre-annotations response = analyze_entities(content) entities = [ e for e in response['entities'] if isinstance(e['type'], str) and e['type'] != 'OTHER' ] json_response = {'entities': add_id_to_entities(entities)} playground.create_prediction(asset_id=asset_id, json_response=json_response)