예제 #1
0
def main():
    client = ipp.Client(profile="default")
    dview = client[:]
    dview.execute("import bodo")
    dview.execute("import numpy as np")
    dview.execute("import pandas as pd")
    dview.execute("import os")
    dview.execute(inspect.getsource(lr_from_sql))

    tp_client = Client('http://localhost:8080/')
    tp_client.deploy('lr_snowflake',
                     lr_snowflake,
                     'Logistic regression from Snowflake table',
                     override=True)
예제 #2
0
def deploy_model(funcName, func, funcDescription):
    # running from deploy_models.py
    config_file_path = sys.argv[1] if len(
        sys.argv) > 1 else get_default_config_file_path()
    port, auth_on, prefix = parse_config(config_file_path)

    connection = Client(f"{prefix}://localhost:{port}/")

    if auth_on:
        # credentials are passed in from setup.py
        user, passwd = sys.argv[2], sys.argv[3] if len(
            sys.argv) == 4 else get_creds()
        connection.set_credentials(user, passwd)

    connection.deploy(funcName, func, funcDescription, override=True)
    print(f"Successfully deployed {funcName}")
예제 #3
0
def deploy_model(funcName, func, funcDescription):
    # running from deploy_models.py
    if len(sys.argv) > 1:
        config_file_path = sys.argv[1]
    else:
        config_file_path = get_default_config_file_path()
    port, auth_on, prefix = parse_config(config_file_path)

    connection = Client(f'{prefix}://localhost:{port}/')

    if auth_on:
        # credentials are passed in from setup.py
        if len(sys.argv) == 4:
            user, passwd = sys.argv[2], sys.argv[3]
        # running Sentiment Analysis independently
        else:
            user, passwd = get_creds()
        connection.set_credentials(user, passwd)

    connection.deploy(funcName, func, funcDescription, override=True)
    print(f'Successfully deployed {funcName}')
예제 #4
0
from tabpy.tabpy_tools.client import Client
client = Client('http://localhost:9004/')
client.set_credentials('kikin', 'karate10')

# Deploying a Function

# * Add Function


def add(x, y):
    import numpy as np
    return np.add(x, y).tolist()


client.deploy('add', add, 'Adds two numbers x and y', override=True)
예제 #5
0
# Define the function tested above
def clustering(x, y):
    import numpy as np
    from sklearn.cluster import DBSCAN
    from sklearn.preprocessing import StandardScaler
    X = np.column_stack([x, y])
    X = StandardScaler().fit_transform(X)
    db = DBSCAN(eps=0.3, min_samples=3).fit(X)
    return db.labels_.tolist()


# Deploy the model to TabPy server
# Add Override = True if you are deploying the model again
client.deploy(
    'clustering', clustering,
    'Returns cluster Ids for each data point specified by the pairs in x and y'
)
"""
Check if the model is model is deployed on the TabPy server at the URL below:
Server URL (This would be the host and port on which you are running the TabPy server):
http://localhost:9004/endpoints
"""
# Sample Data
x = [6.35, 6.40, 6.65, 8.60, 8.90, 9.00, 9.10]
y = [1.95, 1.95, 2.05, 3.05, 3.05, 3.10, 3.15]

# Test the deployed model
print(client.query('clustering', x, y))

# Tableau code for calculated field:
# SCRIPT_INT("
    _arg11 = arg11[0]
    _arg12 = arg12[0]
    _arg13 = arg13[0]
    _arg14 = arg14[0]
    _arg15 = arg15[0]
    _arg16 = arg16[0]

    data = pd.read_csv('data_cleaned_and_pruned.csv', delimiter="|")

    search = pd.DataFrame([[
        _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _arg7, _arg8, _arg9, _arg10,
        _arg11, _arg12, _arg13, _arg14, _arg15
    ]])

    characteristics = data[[
        "acousticness", "danceability", "duration_s", "energy", "explicit",
        "instrumentalness", "key", "liveness", "loudness", "mode",
        "popularity", "speechiness", "tempo", "valence", "year"
    ]]

    kdb = KDTree(characteristics.values)

    results_indexes = kdb.query(search.values, k=5)[-1]

    return str(data.iloc[results_indexes[0][_arg16]]["name"]) + " by " + str(
        data.iloc[results_indexes[0][_arg16]]["artists"]).replace(
            "'", "").replace("[", "").replace("]", "")


connection.deploy('rec1', rec, 'Return recommended song')
    df = pd.concat([
        df_mode, df_customer_category, df_region, df_category, df_quantity,
        df_ratio
    ],
                   axis=1)
    print(df)

    df_dummy = pd.get_dummies(df)

    # 作成したモデルを利用して予測値を取得
    y_pred = clf.predict_proba(df_dummy)

    # 予測結果をリストとして返す
    return y_pred[:, 1].tolist()


if __name__ == '__main__':
    from tabpy.tabpy_tools.client import Client
    client = Client('http://localhost:9004/')
    _desision_tree()
    client.deploy('sub', sub, 'Simple subtraction function.', override=True)
    client.deploy('clustering',
                  clustering,
                  'Returns cluster Ids for each data point specified by the '
                  'pairs in x and y',
                  override=True)
    client.deploy('decision_tree',
                  decision_tree,
                  'decision_tree',
                  override=True)
예제 #8
0
    #Load custom stopwords
    custom_stopwords = []
    with open(stopword_path) as f:
        custom_stopwords = f.read().splitlines()
    custom_stopwords = set(custom_stopwords)
    return [process_text(x, nlp, custom_stopwords) for x in text_list]

def process_text(text, nlp, stopwords):
    '''
    This function performs text data preprocessing, including tokenizing the text, converting text to lower case, removing
    punctuation, removing digits, removing stop words, stemming the tokens, then converting the tokens back to strings.
    
    Args:
    ------
        text (string): the text data to be processed
    
    Returns:
    --------
        Returns processed text (string)
    '''
    if not text:
        return ""
    doc = nlp(text)
    filtered = [token.lemma_ for token in doc if (token.is_stop == False and token.is_alpha and token.is_ascii and  token.like_url == False and token.like_email == False)] # remove stopwords, non-alpha tokens
    tokens = [w.lower() for w in filtered] #lower case
    tokens = [w for w in tokens if w not in stopwords] # remove custom stopwords
    processed_text = ' '.join(tokens) #detokenized
    return processed_text    

client.deploy("clean_text", clean_text, 'Returns processed text using Spacy library', override = True)