コード例 #1
0
ファイル: poc.py プロジェクト: plubon/maskrcnn-benchmark
def main():
    if len(sys.argv) < 3:
        print(
            'Usage: Path to model config path to model last_checkpoint model type'
        )
    output_dir = sys.argv[2]
    model_type = sys.argv[3]
    cfg.merge_from_file(sys.argv[1])
    cfg.merge_from_list(["OUTPUT_DIR", output_dir])
    model = ChataDemo(cfg, model_type)

    client = ApiClient()
    client.login()
    #latest_id = client.get_latest()
    newest_ids = client.get_new_publications()
    for pub_id in newest_ids:
        pages = client.get_pages(pub_id)
        print(pub_id)
        for id, url in pages:
            print(id)
            print(url)
            resp = urllib.request.urlopen(url)
            image = np.asarray(bytearray(resp.read()), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            predictions = model.get_predictions(image)
            annotations = annotate(predictions)
            print(annotations)
            if annotations:
                print('writing annotation...')
                for annotation in annotations:
                    client.add_annotation(id, annotation)
コード例 #2
0
def run_main(args: Arguments):
    api = ApiClient(args.subdomain, args.username, args.token)
    stats = UserStats()
    topic_id = None
    posts = api.get_posts(topic_id,
                          filter_from=args.filter_from,
                          filter_to=args.filter_to)
    for post in posts:
        if api_util.included_in_date_range(post['created_at'],
                                           filter_from=args.filter_from,
                                           filter_to=args.filter_to):
            stats.observe_post_by_user(post['author_id'])
        comments = api.get_comments(post['id'],
                                    filter_from=args.filter_from,
                                    filter_to=args.filter_to)
        for comment in comments:
            stats.observe_comment_by_user(comment['author_id'])
    if args.outputfile:
        with open(args.outputfile, "w") as f:
            for line in stats.to_csv():
                f.write(line)
                f.write('\n')
        print(f"Results saved to {args.outputfile}")
    else:
        print("--- RESULTS ---")
        for line in stats.to_csv():
            print(line)
コード例 #3
0
    def authenticate(self):
        try:
            configuration = Configuration()
            if os.name != 'nt':
                permissions = os.stat(self.config).st_mode
                if (permissions & 4) or (permissions & 32):
                    print(
                        'Warning: Your Kaggle API key is readable by other users on '
                        +
                        'this system! To fix this, you can run \'chmod 600 {}\''
                        .format(self.config))
            with open(self.config, 'r') as f:
                config_data = json.load(f)

            self.copy_config_value(self.CONFIG_NAME_PROXY, config_data)
            self.copy_config_value(self.CONFIG_NAME_PATH, config_data)
            self.copy_config_value(self.CONFIG_NAME_COMPETITION, config_data)
            self.copy_config_value(self.CONFIG_NAME_USER, config_data)

            configuration.username = config_data[self.CONFIG_NAME_USER]
            configuration.password = config_data[self.CONFIG_NAME_KEY]
            if self.CONFIG_NAME_PROXY in config_data:
                configuration.proxy = config_data[self.CONFIG_NAME_PROXY]
            self.api_client = ApiClient(configuration)

        except Exception as error:
            if 'Proxy' in type(error).__name__:
                sys.exit('The specified proxy ' +
                         config_data[self.CONFIG_NAME_PROXY] +
                         ' is not valid, please check your proxy settings')
            else:
                sys.exit(
                    'Unauthorized: you must download an API key from ' +
                    'https://www.kaggle.com/<username>/account\nThen put ' +
                    self.config_file + ' in the folder ' + self.config_path)
コード例 #4
0
ファイル: poc.py プロジェクト: mini-pw/2019L-ProjektZespolowy
def main():
    charts_cfg = cfg.clone()
    tables_cfg = cfg.clone()
    charts_cfg.merge_from_file(charts_path)
    tables_cfg.merge_from_file(tables_path)
    model_charts = ChataDemo(charts_cfg, "charts")
    model_tables = ChataDemo(tables_cfg, "tables")
    
    client = ApiClient()
    client.login()
    newest_ids = client.get_new_publications()
    for pub_id in newest_ids:
        pages = client.get_pages(pub_id)
        print(pub_id)
        for id, url in pages:
            print(id)
            print(url)
            resp = urllib.request.urlopen(url)
            image = np.asarray(bytearray(resp.read()), dtype="uint8")
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            predictions_charts = model_charts.get_predictions(image)
            predictions_tables = model_tables.get_predictions(image)
            annotations_charts = annotate(predictions_charts)
            annotations_tables = annotate(predictions_tables)
            annotations = annotations_charts+annotations_tables
            print(annotations)
            if annotations:
                client.add_annotation(id, annotations)
コード例 #5
0
def get_mock_trello(trello_config):
    api_client = ApiClient()
    api_client.get = mock_get([{
        "id": "5eeb648682bb2c685cc659f0",
        "name": NOT_STARTED,
        "closed": False,
        "pos": 1,
        "softLimit": None,
        "idBoard": "5eeb6486ebbfa66d2e519922",
        "subscribed": False
    }, {
        "id": "5eeb648623bb2c685cc659f0",
        "name": IN_PROGRESS,
        "closed": False,
        "pos": 2,
        "softLimit": None,
        "idBoard": "5eeb6486ebbfa66d2e519922",
        "subscribed": False
    }, {
        "id": "5eeb6486cae807625c9f0819",
        "name": COMPLETED,
        "closed": False,
        "pos": 3,
        "softLimit": None,
        "idBoard": "5eeb6486ebbfa66d2e519922",
        "subscribed": False
    }])
    trello = Trello(trello_config, api_client)
    return trello, api_client, trello_config
コード例 #6
0
 def __init__(self):
     self._last_invalid_measurements = {}
     self._mail_sender = MailSender(config['sender'],
                                    config['receiver_email'],
                                    config['MONITORED_VALUES'])
     self._timer = MonitorTimer()
     self._api_client = ApiClient()
     self.test_dedicated_state = False
コード例 #7
0
ファイル: venemy.py プロジェクト: mportatoes/venemy
 def __init__(self, access_token: str):
     """
     VenmoAPI Client
     :param access_token: <str> Need access_token to work with the API.
     """
     super().__init__()
     self.__access_token = validate_access_token(access_token=access_token)
     self.__api_client = ApiClient(access_token=access_token)
     self.user = UserApi(self.__api_client)
コード例 #8
0
    def log_out(access_token: str) -> bool:
        """
        Revoke your access_token
        :param access_token: <str>
        :return:
        """

        resource_path = '/oauth/access_token'
        api_client = ApiClient(access_token=access_token)

        api_client.call_api(resource_path=resource_path, method='DELETE')

        confirm(f"Successfully logged out.")
        return True
コード例 #9
0
ファイル: venemy.py プロジェクト: mportatoes/venemy
 def get_access_token(username: str,
                      password: str,
                      device_id: str = None) -> str:
     """
     Log in using your credentials and get an access_token to use in the API
     :param username: <str> Can be username, phone number (without +1) or email address.
     :param password: <str> Account's password
     :param device_id: <str> [optional] A valid device-id.
     :return: <str> access_token
     """
     authn_api = AuthenticationApi(api_client=ApiClient(),
                                   device_id=device_id)
     return authn_api.login_with_credentials_cli(username=username,
                                                 password=password)
コード例 #10
0
    def test_get_comments(self, mock_requests_get):

        expected = [
            {'id': 2, 'body': 'some comment', 'postId': 2},
            {'id': 3, 'body': 'some comment', 'postId': 2}
        ]

        mock_response = MagicMock()
        mock_response.json.return_value = expected

        mock_requests_get.return_value = mock_response

        r = ApiClient().get_comments()

        self.assertEqual(r, expected)
コード例 #11
0
def test_app():
    trello_config = Config(".env").trello_config
    trello = Trello(trello_config, ApiClient()).create_test_board()
    application = app.create_app({}, trello)

    thread = Thread(target=lambda: application.run(use_reloader=False))
    thread.daemon = True
    thread.start()

    # I really feel like this shouldn't be necessary, but otherwise selenium makes requests before flask has started
    time.sleep(1)

    yield app

    thread.join(1)
    trello.delete_board()
コード例 #12
0
 def __init__(self,
              base_url,
              token,
              sleep_seconds=2,
              timeout_seconds=sys.maxsize,
              timeout_func=_default_timeout_func,
              verbose=True):
     """
     :param base_url: Base URL of API such as https://acme.cloud.databricks.com/api/2.0
     :param token: API token
     :param sleep_seconds: Seconds to sleep when polling for cluster readiness
     :param timeout_seconds: Timeout in seconds
     :param verbose: To be verbose or not?
     """
     self.api_client = ApiClient(base_url, token)
     self.base_url = base_url
     self.sleep_seconds = sleep_seconds
     self.timeout_seconds = timeout_seconds
     self.timeout_func = timeout_func
     self.verbose = verbose
     config_file = "config.json"
     if os.path.exists(config_file):
         logging.info("Reading config file {}".format(config_file))
         with open(config_file, 'rb') as f:
             dct = json.loads(f.read())
         self.cluster_noninit_states = set(dct['cluster_noninit_states'])
         self.run_terminal_states = set(dct['run_terminal_states'])
     else:
         self.cluster_noninit_states = {
             "RUNNING", "TERMINATED", "ERROR", "UNKNOWN"
         }
         self.run_terminal_states = {
             "TERMINATED", "SKIPPED", "INTERNAL_ERROR"
         }
     logging.info("cluster_noninit_states: {}".format(
         self.cluster_noninit_states))
     logging.info("run_terminal_states: {}".format(
         self.run_terminal_states))
コード例 #13
0
def run_main(args: Arguments):
    api = ApiClient(args.subdomain, args.username, args.token)
    selected_user_id = None
    while True:
        mode = enquiries.choose('Select an action:', ['Get stats', 'Award badge', 'Exit'])
        print(mode)
        if mode == 'Exit':
            return
        elif mode == 'Award badge':
            if selected_user_id:
                badges = api.get_badges()
                badge_labels = map(lambda b: f"{b['id']} {b['name']}", badges)
                badge = enquiries.choose('Select a badge:', badge_labels)
                badge_id = badge.split(' ', 1)[0]
                print(f"Assigning badge {badge_id} to user {selected_user_id}")
                response = api.create_badge_assignment(selected_user_id, badge_id)
                print(str(response))
            else:
                print("First select a user, for example by looking at stats")
        elif mode == 'Get stats':
            stats = UserStats()
            topic_id = None
            posts = api.get_posts(topic_id, filter_from=args.filter_from, filter_to=args.filter_to)
            for post in posts:
                if api_util.included_in_date_range(post['created_at'], filter_from=args.filter_from, filter_to=args.filter_to):
                    stats.observe_post_by_user(post['author_id'])
                comments = api.get_comments(post['id'], filter_from=args.filter_from, filter_to=args.filter_to)
                for comment in comments:
                    stats.observe_comment_by_user(comment['author_id'])
            users = []
            for user_id in stats.stats_by_user:
                userstats = stats.stats_by_user[user_id]
                user = f"{user_id} - {userstats.get('posts') or 0} posts, {userstats.get('comments') or 0} comments"
                users.append(user)
            select_user = enquiries.choose('Here are your top contributors. Select one:', users)
            selected_user_id = select_user.split(' ', 1)[0]
コード例 #14
0
"""
This module is sample code that shows how to invoke the web service to get datasets
We will show how to use this for problem on identifying algebric identities
Author: Palacode Narayana Iyer Anantharaman
Date: 5 Oct 2018
"""
from api_client import ApiClient

if __name__ == "__main__":
    # in order to use the web service, first create the instance of ApiClient class
    client = ApiClient()

    # test it with echo service
    val = client.echo("hi there!!!!")
    print("The Service Returned: ", val)

    # you can use the method algebric identity simulated dataset
    # this will be rate limited
    num_samples = 5
    val = client.algebra(5)
    inputs = val["inputs"]
    outputs = val["outputs"]
    for inp, outp in zip(inputs, outputs):
        print(inp, outp)
コード例 #15
0
from api_client import ApiClient
import json

api = ApiClient()

query_params = {'page_no': 1, "page_size": 2}
rooms_resp = api.request("GET",
                         "http://127.0.0.1:5000/rooms",
                         query_params=query_params,
                         _preload_content=True)
data = json.loads(rooms_resp.data)

f1 = "D:/work/py-apps/client/config/openapi.json"

with open(f1) as f:
    oas_data = json.load(f)

urls = []
"""
path:
method: 
parameters,
order: 
dependent: [""]
type: full|incremental
pagination: true or false
"""
for path, path_dict in oas_data.get("paths"):

    for method, method_dict in path_dict:
        url = {
コード例 #16
0
ファイル: monitor.py プロジェクト: mdm373/salt-get
from dotenv import load_dotenv
import os
from state import read_state, write_state, ALERTED_FOR_LEVEL, update_state_alert_level
from webhook import make_webhook


def optional_environ(name, default):
    return default if name not in os.environ else os.environ[name]


load_dotenv()

api_host = optional_environ("API_HOST", "localhost")
api_port = int(optional_environ("API_PORT", "5000"))

client = ApiClient(api_host, api_port)
state = read_state()


def get_msg_data(dist):
    return f"\n\tDistance: {dist}\n\tWarning At: {warn_distance}"


def handle_salt_low(current_distance):
    dist = current_distance["distance"]
    alerted = ALERTED_FOR_LEVEL in state
    if dist < warn_distance or alerted:
        return

    update_state_alert_level(dist)
    webhook(
コード例 #17
0
 def __init__(self, app_token):
     self.base = 'calendars'
     self.api_client = ApiClient(app_token, self.base)
コード例 #18
0
    def get_influx_database(self):
        return self.args['influxdb_database']

    def get_mqtt_url(self):
        return self.args['mqtt_url']

    def get_telemetry_types(self):
        return self.args['telemetry_types']

    def get_streams_per_telegraf(self):
        return self.args['streams_per_telegraf']


options = Options(sys.argv[1:])
api_client = ApiClient(options.get_api_url(), options.get_api_key())
influx_client = InfluxClient(options.get_influx_url(),
                             options.get_influx_port(),
                             options.get_influx_username(),
                             options.get_influx_password())

company_id = api_client.get_company_id()
database = options.get_influx_database()
measurement = 'telemetry'

unique_ids = api_client.get_telemetry_unique_ids(
    api_venue_id=options.get_api_venue_id())

topics = [
    '/stream/%s/%s' % elem
    for elem in itertools.product(unique_ids, options.get_telemetry_types())
コード例 #19
0
label_dict = {
    0: "T-shirt/top",
    1: "Trouser",
    2: "Pullover",
    3: "Dress",
    4: "Coat",
    5: "Sandal",
    6: "Shirt",
    7: "Sneaker",
    8: "Bag",
    9: "Ankle boot"
}

if __name__ == "__main__":
    # in order to use the web service, first create the instance of ApiClient class
    client = ApiClient(
    )  # NOTE: you need to call ApiClient(auth_key=YOUR_API_KEY)

    # if needed, test it with echo service
    # this is the simplest service that we can use to check if web service is running correctly
    # uncomment the code below to test echo()
    # val = client.echo("hi there!!!!")
    # print("For the echo service the returned value is: ", val)

    # you can use the method get_fashion_mnist_data to get data for fashion mnist
    # this will be rate limited to 10000 samples per call
    # for test accounts we will return a sample of 5 records to give you a feel
    num_samples = 10
    images, labels = client.get_fashion_mnist_data(num_samples)

    # you can print the labels returned by the service and examine
    # I am printing first 10 labels below
コード例 #20
0
async def main(loop):

    aio_jikan = AioJikan(loop=loop)

    seasons = ['winter', 'spring', 'summer', 'fall']

    root_url = 'https://sovetromantica.com'
    apiSesson = ApiClient()
    await apiSesson.login('silverlynxx', '123456')

    # year = 2019
    # season = 'spring'
    for year in range(1990, 2021):
        for season in seasons:
            season_year = await aio_jikan.season(year=year, season=season)

            season_anime = season_year['anime']
            for anime in season_anime:
                print('\n', anime['title'])
                embed_links_sub, embed_links_dub = await parse_sovetromantica(
                    anime['title'], 'anime')
                for link in embed_links_sub:
                    num = int(re.findall(r'_(\d*?)-subtitles', link)[-1])
                    num_len = len(str(num))
                    ep_num = ''
                    if num_len == 1:
                        ep_num = f'e00{num}'
                    if num_len == 2:
                        ep_num = f'e0{num}'
                    if num_len == 3:
                        ep_num = f'e{num}'
                    record_headline = f'{anime["title"]} ({ep_num}) (sub) [sovetromantica]'
                    record_text = f'{anime["title"]} ({ep_num}) [{season} {year}]'
                    tags = []
                    tags.append(
                        ('_'.join(re.findall(r'\W?(\w+)\W?',
                                             anime['title']))).lower())
                    tags.append(str(year))
                    tags.append(f'{season}_{year}')
                    tags.append('sub')
                    tags.append(ep_num)
                    record_resp = await apiSesson.create_record(
                        record_headline, record_text, tags)
                    recordid = record_resp[0]['data']['recordid']
                    media_data = {'url': f'{root_url}{link}'}
                    media_type = 'embedded_video'
                    media_description = f'{anime["title"]} ({ep_num}) (sub) [sovetromantica]'
                    media_resp = await apiSesson.set_record_media(
                        recordid, media_data, media_type, media_description)
                for link in embed_links_dub:
                    num = int(re.findall(r'_(\d*?)-dubbed', link)[-1])
                    num_len = len(str(num))
                    ep_num = ''
                    if num_len == 1:
                        ep_num = f'e00{num}'
                    if num_len == 2:
                        ep_num = f'e0{num}'
                    if num_len == 3:
                        ep_num = f'e{num}'
                    record_headline = f'{anime["title"]} ({ep_num}) (dub) [sovetromantica]'
                    record_text = f'{anime["title"]} ({ep_num}) [{season} {year}]'
                    tags = []
                    tags.append(
                        ('_'.join(re.findall(r'\W?(\w+)\W?',
                                             anime['title']))).lower())
                    tags.append(str(year))
                    tags.append(f'{season}_{year}')
                    tags.append('dub')
                    tags.append(ep_num)
                    record_resp = await apiSesson.create_record(
                        record_headline, record_text, tags)
                    recordid = record_resp[0]['data']['recordid']
                    media_data = {'url': f'{root_url}{link}'}
                    media_type = 'embedded_video'
                    media_description = f'{anime["title"]} ({ep_num}) (dub) [sovetromantica]'
                    media_resp = await apiSesson.set_record_media(
                        recordid, media_data, media_type, media_description)

            await aio_jikan.close()
    await apiSesson.close()
コード例 #21
0
 def create(cls, base, username, password):
     """Factory method using default ApiClient class."""
     client = ApiClient(base, username, password)
     return cls(client)
コード例 #22
0
def main():
    api_client = ApiClient(SECUREX_VISIBILITY_HOST_NAME, SECUREX_CLIENT_ID,
                           SECUREX_CLIENT_PASSWORD)

    # read stored cursor of last security event item from previous script run (if any)
    # to process only new or modified events in current script run
    previous_events_end_cursor = read_events_end_cursor()

    # USE BULK LOADING RESOURCES ON API TO PRELOAD EVENTS AND IMPORTANT REFERRED OBJECTS

    # remember events
    events = []
    # cached contextual objects - alerts, threat detections, threat intel records, assets
    cached_context_objects = {}

    threat_detection_ids = []
    affected_asset_ids = []

    # iterate over all security events sorted by unique modification sequence number ensuring no event is missed
    events_iterator = api_client.create_collection_iterator(
        collection_url_path=build_event_with_threat_detections_ids_url(
            api_client.get_customer_id()),
        query_params={
            "sort": "modificationSequenceNumber",
        },
        cursor=previous_events_end_cursor)
    # remember events and get ids of related object (threat detections and assets) to "bulk load" them
    for event in events_iterator:
        events.append(event)
        threat_detection_ids.extend(event["threatDetectionIds"])
        affected_asset_ids.append(event["affectedAssetId"])

    alert_ids = []
    threat_intel_record_ids = []

    for distinct_threat_detection_ids_chunk in chunk_list(
            list(set(threat_detection_ids))):
        # bulk load threat detections chunk by their ids
        threat_detections_iterator = api_client.create_collection_iterator(
            collection_url_path=build_search_threat_detection_with_alert_id_url(
                api_client.get_customer_id()),
            request_body={
                "filter": {
                    "threatDetectionIds": distinct_threat_detection_ids_chunk
                }
            })

        # keep threat detections in "cached_context_objects" and extract IDs of alerts and threat intel records
        for threat_detection in threat_detections_iterator:
            cached_context_objects[threat_detection["id"]] = threat_detection
            alert_ids.extend(threat_detection["alertIds"])
            threat_intel_record_ids.append(
                threat_detection["threatIntelRecordId"])

    for distinct_affected_asset_ids_chunk in chunk_list(
            list(set(affected_asset_ids))):
        # bulk load assets chunk by their ids
        affected_asset_iterator = api_client.create_collection_iterator(
            collection_url_path=build_assets_search_url(
                api_client.get_customer_id()),
            request_body={
                "filter": {
                    "assetId": distinct_affected_asset_ids_chunk
                }
            })

        # keep assets in "cached_context_objects"
        for affected_asset in affected_asset_iterator:
            cached_context_objects[affected_asset["id"]] = affected_asset

    # bulk load alerts
    alerts_iterator = api_client.create_collection_iterator(
        collection_url_path=build_alerts_search_url(
            api_client.get_customer_id()),
        request_body={"filter": {
            "alertIds": list(set(alert_ids))
        }})

    # keep alerts in "cached_context_objects"
    for alert in alerts_iterator:
        cached_context_objects[alert["id"]] = alert

    # bulk load threat intelligence records
    threat_intel_records_iterator = api_client.create_collection_iterator(
        collection_url_path=build_threat_intel_search_url(),
        request_body={
            "filter": {
                "threatIntelRecordId": list(set(threat_intel_record_ids))
            }
        })

    # keep threat intelligence records in "cached_context_objects"
    for threat_intel_record in threat_intel_records_iterator:
        cached_context_objects[threat_intel_record["id"]] = threat_intel_record

    # PROCESS EVENTS AND OTHER CONTEXTUAL OBJECTS

    # start with events, get contextual objects for each event from cached_context_objects and log them together
    for event in events:
        # get affected asset referred by event
        affected_asset = cached_context_objects.get(event["affectedAssetId"],
                                                    None)

        # get IDs references to threat detections from event (usually one, can be [])
        threat_detection_ids = event["threatDetectionIds"]

        # process event with threat detections (called "convicting" security event)
        for threat_detection_id in threat_detection_ids:
            # get parent objects to event - threat detections and alert
            # and also threat intel record from threat-catalog bounded context
            threat_detection_with_alert_ids = cached_context_objects.get(
                threat_detection_id, None)
            threat_intel_record = cached_context_objects.get(
                threat_detection_with_alert_ids["threatIntelRecordId"],
                None) if threat_detection_with_alert_ids else None

            # get IDs references to alerts (usually one)
            alert_ids = threat_detection_with_alert_ids[
                "alertIds"] if threat_detection_with_alert_ids else []
            for alert_id in alert_ids:
                alert = cached_context_objects.get(alert_id, None)

                # log event with all related objects - alert, threat detection, threat intel record
                log_event_attributes(event, affected_asset,
                                     threat_detection_with_alert_ids,
                                     threat_intel_record, alert)
            else:
                # process event with missing threat detection or missing alert (might be GC)
                log_event_attributes(event, affected_asset,
                                     threat_detection_with_alert_ids,
                                     threat_intel_record)
        else:
            # process event without threat detections (called "contextual" security event)
            # log event just with affected_asset
            log_event_attributes(event, affected_asset)

    # store events end_cursor for next script run
    if events_iterator.end_cursor():
        write_events_end_cursor(events_iterator.end_cursor())
コード例 #23
0
 def __init__(self):
     self.api_client = ApiClient()
コード例 #24
0
ファイル: app.py プロジェクト: balins/sneaker-designer
def download_samples(starting_page=0, limit=sys.maxsize, image_size="s"):
    api_client = ApiClient(starting_page=starting_page,
                           limit=limit,
                           image_size=image_size)
    asyncio.run(api_client.start_bulk_download())
コード例 #25
0
 def create(cls, base, username, password):
     client = ApiClient(base, username, password)
     return cls(client)
コード例 #26
0
 def __init__(self, app_token):
     self.base = "resources"
     self.api_client = ApiClient(app_token)
コード例 #27
0
import pickle
import nltk
from api_client import ApiClient

my_auth_key = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1NTQ2MjY5ODAsImlhdCI6MTUzOTA3NDk4MCwibmJmIjoxNTM5MDc0OTgwLCJpZGVudGl0eSI6MTF9.kbHzSmkCqCu3akW7bMM6Vj4flZ-CVGuRYixh-k2x0Ps"

client = ApiClient(auth_key=my_auth_key)

#Load the dataset that was obtained from API and pickeled beforehand
with open('dataset.pkl', 'rb') as handle:
    dataset = pickle.load(handle)

#Extract all questions tokenized as sentences from the dataset
sentences = []
for i in dataset:
    sentences += nltk.sent_tokenize(i['question1'])
    sentences += nltk.sent_tokenize(i['question2'])

#Tokenize as words and find their frequency and select top 10000
word_dist = nltk.FreqDist()
for s in sentences:
    word_dist.update([i.lower() for i in nltk.word_tokenize(s)])

word_dist = word_dist.most_common(10000)

#obtain the glove vectors for the 10000 words from the web service
embeddings_list = []
for i in range(10, 100):
    embeddings_list += client.w2v(
        [i[0] for i in word_dist[i * 100:i * 100 + 100]])
コード例 #28
0
from flask import Flask, render_template, request, redirect, url_for
from config import Config
from trello import Trello
from status import COMPLETED, IN_PROGRESS
from view_model import ViewModel
from api_client import ApiClient

config = Config('.env')
trello = Trello(config.trello_config, ApiClient())

def create_app(script_info, trello=trello):
    app = Flask(__name__)

    # Allowing POST so we don't need frontend JS
    @app.route('/item/<id>/mark-completed', methods=['PUT', 'POST'])
    def mark_completed(id):
        trello.set_status(id, COMPLETED)
        return redirect('/')

    @app.route('/item/<id>/start-item', methods=['PUT', 'POST'])
    def start_item(id):
        trello.set_status(id, IN_PROGRESS)
        return redirect('/')

    @app.route('/item/<id>/delete', methods=['DELETE', 'POST'])
    def delete(id):
        item = trello.delete_item(id)
        return redirect('/')

    @app.route('/item/add', methods=['POST'])
    def add_item():
コード例 #29
0
def main():
    '''
    main entry point into the 'app'
    every function needs a Docstring in order to follow best
    practices
    '''

    # show_intro()
    '''
    collect the environment options from the command line
    this is packaged and managed via the EnvironmentOptions
    module in the environment_options directory
    '''
    environment_options = EnvironmentOptions()
    environment_options.get_options()

    if environment_options.debug:
        print(f'{environment_options}\n')
    '''
    check to make sure all required options have been specified
    if the username and password have not been provided on the
    command line, we'll prompt for them here
    '''
    if not environment_options.cluster_ip:
        raise DetailsMissingException('Cluster IP is required.')
    elif not environment_options.username:
        raise DetailsMissingException('Username is required.')
    elif not environment_options.password:
        raise DetailsMissingException('Password is required.')
    else:
        if environment_options.debug:
            print('All parameters OK.\n')

        if environment_options.debug:
            print('Creating instance of ApiClient class ...')
        client_blueprint = ApiClient(
            environment_options.cluster_ip,
            (f'blueprints/{environment_options.blueprint_uuid}'
             '/runtime_editables'), '', environment_options.username,
            environment_options.password, environment_options.read_timeout)
        if environment_options.debug:
            print(f'Client info: {client_blueprint}\n')
        if environment_options.debug:
            print('Requesting blueprint details ...\n')
        results_blueprint = client_blueprint.send_request()
        '''
        check to see if there are app profile references
        in the JSON response
        this is ONLY required if --app_profile was not
        specified as a command-line parameter

        if no app_profile has been specified, we can get it
        from the API request we just made
        if no app_profile has been specified but it is also
        missing from the JSON response, it likely indicates
        a failed request or a request that wasn't a call to
        a blueprint's runtime_editables api

        ***Important note:
        This demo script only looks for
        an app profile named 'Default'; if no matching app
        profile reference exists in your environment, please
        edit the 'Default' name below, and/or manually construct
        the value of app_profile_reference

        '''

        if environment_options.app_profile_reference == '':
            if environment_options.debug:
                print('No app profile specified; grabbing it from '
                      'the blueprint spec ...')
            if 'resources' in results_blueprint:
                if len(results_blueprint['resources']) > 0:
                    if 'app_profile_reference' in results_blueprint[
                            'resources'][0]:
                        if len(results_blueprint['resources'][0]) > 0:
                            if environment_options.debug:
                                print('App profile reference found.')
                            for reference in results_blueprint['resources']:
                                if (reference['app_profile_reference']['name']
                                        == 'Default'):
                                    environment_options.app_profile_reference = (
                                        reference['app_profile_reference']
                                        ['uuid'])
                                    if environment_options.debug:
                                        print(
                                            'Default app profile reference UUID extracted.'
                                        )
                        else:
                            print('no app profile references found; exiting.')
                            sys.exit()
                    else:
                        print('no app profile references found; exiting.')
                        sys.exit()
                else:
                    print('no app profile references found; exiting.')
                    sys.exit()
            else:
                print('no app profile references found; exiting.')
                sys.exit()
        else:
            if environment_options.debug:
                print(
                    'App profile already specified; using command line parameter ...'
                )
            pass
        '''
        at this point we do have our app profile reference
        we can continue with the next part of the script, i.e.
        creating the payload/request body that will be sent
        with the actual launch request
        '''
        if environment_options.debug:
            print('Building blueprint launch payload ...\n')
        payload = ('{ '
                   '"spec":{ '
                   f'"app_name":"{environment_options.app_name}", '
                   f'"app_description":"{environment_options.app_desc}", '
                   '"app_profile_reference":{ '
                   '"kind":"app_profile", '
                   '"name":"Default", '
                   f'"uuid":"{environment_options.app_profile_reference}" '
                   '} '
                   '} '
                   '}')

        if environment_options.debug:
            print(f'Payload: \n{payload}\n')

        if environment_options.debug:
            print('Creating instance of ApiClient class ...')
        client_launch = ApiClient(
            environment_options.cluster_ip,
            (f'blueprints/{environment_options.blueprint_uuid}'
             '/simple_launch'),
            payload,
            environment_options.username,
            environment_options.password,
            environment_options.read_timeout,
            method='post')

        if environment_options.debug:
            print(f'Client info: {client_launch}\n')
        if environment_options.debug:
            print('Sending request ...\n')
        results_launch = client_launch.send_request()
        if environment_options.debug:
            print(f'Results: \n{results_launch}')
コード例 #30
0
 def __init__(self, app_token):
     self.base = 'credentials'
     self.api_client = ApiClient(app_token)