예제 #1
0
    def test_init(self):
        client = Client("http://example.com:9004")

        self.assertEqual(client._endpoint, "http://example.com:9004")

        client = Client("http://example.com/", 10.0)

        self.assertEqual(client._endpoint, "http://example.com/")

        client = Client(endpoint="https://example.com/", query_timeout=-10.0)

        self.assertEqual(client._endpoint, "https://example.com/")
        self.assertEqual(client.query_timeout, 0.0)

        # valid name tests
        with self.assertRaises(ValueError):
            Client('')
        with self.assertRaises(TypeError):
            Client(1.0)
        with self.assertRaises(ValueError):
            Client("*#")
        with self.assertRaises(TypeError):
            Client()
        with self.assertRaises(ValueError):
            Client("http:/www.example.com/")
        with self.assertRaises(ValueError):
            Client("httpx://www.example.com:9004")
예제 #2
0
def main():
    client = ipp.Client(profile="default")
    dview = client[:]
    dview.execute("import bodo")
    dview.execute("import numpy as np")
    dview.execute("import pandas as pd")
    dview.execute("import os")
    dview.execute(inspect.getsource(lr_from_sql))

    tp_client = Client('http://localhost:8080/')
    tp_client.deploy('lr_snowflake',
                     lr_snowflake,
                     'Logistic regression from Snowflake table',
                     override=True)
예제 #3
0
def deploy_model(funcName, func, funcDescription):
    # running from deploy_models.py
    config_file_path = sys.argv[1] if len(
        sys.argv) > 1 else get_default_config_file_path()
    port, auth_on, prefix = parse_config(config_file_path)

    connection = Client(f"{prefix}://localhost:{port}/")

    if auth_on:
        # credentials are passed in from setup.py
        user, passwd = sys.argv[2], sys.argv[3] if len(
            sys.argv) == 4 else get_creds()
        connection.set_credentials(user, passwd)

    connection.deploy(funcName, func, funcDescription, override=True)
    print(f"Successfully deployed {funcName}")
def clean_text(input_df):
    '''
    This function create preprocessed PAR and output the new dataframe.
    Called in Tableau Prep
    Args:
    ------
        whole dataframe from Tableau
    
    Returns:
    --------
        Returns processed pandas dataframe
    '''
    client = Client("http://10.155.94.140:9004/")
    processed = client.query('clean_text',
                             input_df['X_PAR_COMMENTS'].tolist())['response']
    input_df['PROCESSED_PAR'] = processed
    output_df = input_df
    # return the entire df
    return output_df
예제 #5
0
def deploy_model(funcName, func, funcDescription):
    # running from deploy_models.py
    if len(sys.argv) > 1:
        config_file_path = sys.argv[1]
    else:
        config_file_path = get_default_config_file_path()
    port, auth_on, prefix = parse_config(config_file_path)

    connection = Client(f'{prefix}://localhost:{port}/')

    if auth_on:
        # credentials are passed in from setup.py
        if len(sys.argv) == 4:
            user, passwd = sys.argv[2], sys.argv[3]
        # running Sentiment Analysis independently
        else:
            user, passwd = get_creds()
        connection.set_credentials(user, passwd)

    connection.deploy(funcName, func, funcDescription, override=True)
    print(f'Successfully deployed {funcName}')
예제 #6
0
from tabpy.tabpy_tools.client import Client
client = Client('http://localhost:9004/')
client.set_credentials('kikin', 'karate10')

# Deploying a Function

# * Add Function


def add(x, y):
    import numpy as np
    return np.add(x, y).tolist()


client.deploy('add', add, 'Adds two numbers x and y', override=True)
예제 #7
0
파일: dbscan.py 프로젝트: mmuell-ai/tabpy
from tabpy.tabpy_tools.client import Client

client = Client('http://localhost:8080/')
def clustering(x, y):
    import numpy as np
    from sklearn.cluster import DBSCAN
    from sklearn.preprocessing import StandardScaler
    X = np.column_stack([x, y])
    X = StandardScaler().fit_transform(X)
    db = DBSCAN(eps=1, min_samples=3).fit(X)
    return db.labels_.tolist()

if __name__ == "__main__":
    client.deploy(
        'clustering',
        clustering,
        'Returns cluster Ids for each data point specified by the '
        'pairs in x and y'
    )
#!/usr/bin/env python
# coding: utf-8

import pandas as pd
from scipy.spatial import KDTree
from tabpy.tabpy_tools.client import Client

connection = Client('http://localhost:9004/')


def rec(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11,
        arg12, arg13, arg14, arg15, arg16):

    _arg1 = arg1[0]
    _arg2 = arg2[0]
    _arg3 = arg3[0]
    _arg4 = arg4[0]
    _arg5 = arg5[0]
    _arg6 = arg6[0]
    _arg7 = arg7[0]
    _arg8 = arg8[0]
    _arg9 = arg9[0]
    _arg10 = arg10[0]
    _arg11 = arg11[0]
    _arg12 = arg12[0]
    _arg13 = arg13[0]
    _arg14 = arg14[0]
    _arg15 = arg15[0]
    _arg16 = arg16[0]

    data = pd.read_csv('data_cleaned_and_pruned.csv', delimiter="|")
예제 #9
0
 def setUp(self):
     self.client = Client("http://example.com/")
     self.client._service = Mock()  # TODO: should spec this
예제 #10
0
# Import packages
from tabpy.tabpy_tools.client import Client

client = Client("http://10.155.94.140:9004/")

def clean_text(text_list):
    """
    Clean text with spacy library
    """
    import pandas as pd
    import numpy as np
    import spacy

    # configure stopword path
    stopword_path = "/home/nusintern/project/nus/scripts/stopwords.txt"

    # Import language model
    nlp = spacy.load('en_core_web_sm', disable=['tagger','parser', 'ner'])

    #Load custom stopwords
    custom_stopwords = []
    with open(stopword_path) as f:
        custom_stopwords = f.read().splitlines()
    custom_stopwords = set(custom_stopwords)
    return [process_text(x, nlp, custom_stopwords) for x in text_list]

def process_text(text, nlp, stopwords):
    '''
    This function performs text data preprocessing, including tokenizing the text, converting text to lower case, removing
    punctuation, removing digits, removing stop words, stemming the tokens, then converting the tokens back to strings.