コード例 #1
0
def predict_result():
    if request.method == 'GET':
        dir_path = request.args.get('dir_path')
        dir_name = request.args.get('dir_name')
        return render_template('prediction_loading.html',
                               dir_name=dir_name,
                               dir_path=dir_path)

    if request.method == 'POST':
        arguments = json.loads(request.data.decode("UTF-8"))
        dir_name, dir_path = arguments.get('dir_name'), arguments.get(
            'dir_path')
        dir_name = dir_name.replace('.zip', '')
        data = predict(root=dir_path, img_zip=dir_name)
        output = get_formatted_data(
            **data,
            threshold=request.args.get('threshold', 0.5),
        )
        img_dir = os.path.join(dir_path, dir_name)
        root = os.path.dirname(__file__)
        # command = f"cp -r {os.path.join(os.path.dirname(root), img_dir)} {os.path.join(root, 'static')}"
        # os.popen("sudo -S %s" % command, 'w').write('Root2018!\n')
        image_names = os.listdir(os.path.join(root, 'static', dir_name))
        # return render_template('predict_result.html', dir_name=dir_name, image_names=image_names, predictions=output)
        uid = str(uuid.uuid4())
        PREDICTIONS[uid] = dict(dir_name=dir_name,
                                image_names=image_names,
                                predictions=output)
        return {'done': True, 'uid': uid}
コード例 #2
0
    def predict_endpoint():

        passenger = request.json
        passenger_df = pd.DataFrame(passenger, index = [0])
        checkedData = ensure_correct_order(passenger_df)
        checked_df = checkColumns(checkedData)  
        prediction = predict(checked_df, encode_cabin, extract_cabin_number, encode_title, 'models/model.joblib')

        return("{{ 'prediction': {0} }}".format(prediction))
コード例 #3
0
def prediction():
    Config.ENTITY_NAME = 'animal'
    Config.ITERATION = 0
    Config.MODEL_NAME = 'nn_model'

    image = Image.open('cats_00001.jpg')
    preds = predict(image, entity_name='nist',
                    model_name='nn_model', model_iteration=0)

    print(preds)
    return ""
コード例 #4
0
ファイル: app.py プロジェクト: Oleg979/Cardio-Helper
def classify():
    print("Initializing backend...")
    print("Loading network...")
    body = request.json
    print(body)
    probability = predict(body['data'])
    print(probability)
    return jsonify({
        'success': True,
        'result': json.dumps(probability.astype(float))
    })
コード例 #5
0
ファイル: app.py プロジェクト: karthikbasavaraju/DeepDash
def prediction():

    Config.ENTITY_NAME = request.get_json()['entityName']
    Config.ITERATION = request.get_json()['iteration']
    Config.MODEL_NAME = request.get_json()['modelName']
    image = request.get_json()['image']

    image = base64.b64decode(str(image))
    image = Image.open(io.BytesIO(image))

    # if model_name == 'KNN':
    #     preds = knn_model.KNN_Model(), predict(
    #         image, entity_name, 'KNN', model_iteration)
    # elif model_name == 'DNN':

    if Config.MODEL_NAME == 'DNN':
        preds = predict(image, entity_name=Config.ENTITY_NAME,
                        model_name=Config.MODEL_NAME, model_iteration=Config.ITERATION)
    if Config.MODEL_NAME == 'KNN':
        preds = "Model not Trained yet"
    # print(preds)
    return jsonify(preds)
コード例 #6
0
def test_predict():
    res = predict('Hallo')
    assert isinstance(res, str)
コード例 #7
0
def classify():
    filename = upload_file(request.files['file'])
    res = predict(filename)
    return jsonify({'success': True, 'class': res})
コード例 #8
0
                        jac=True,
                        method='TNC',
                        options=options)

cost = res.fun
theta = res.x

print('Cost at theta found by optimize.minimize: {:.3f}'.format(cost))
print('Expected cost (approx): 0.203')

print('Theta:{:.3f}, {:.3f}, {:.3f}'.format(*theta))
print('Expected theta (approx):-25.161, 0.206, 0.201')

# Plot decision boundary
plotDecisionBoundary(theta, X_padded, y)

# Predict probability for a student with score 45 on exam 1 and score 85 on exam 2

grades = np.array([1, 45, 85])
prob = sigmoid(grades.dot(theta))
print(
    'For a student with scores 45 and 85, we predict an admission probability of {:.3f}'
    .format(prob))
print('Expected value: 0.775 +/- 0.002')

# Compute accuracy on our training set
p = predict(theta, X_padded)
accuracy = np.mean(y == p) * 100
print('Train Accuracy: {}%'.format(accuracy))
print('Expected accuracy (approx): 89.0')
コード例 #9
0
def main(args):
    progress = WorkSplitter()

    progress.section("Parameter Setting")
    print("Data Path: {}".format(args.data_dir))
    reviewJsonToronto = args.data_dir + args.data_name

    progress.section("Load data")
    df = get_yelp_df(path='', filename=reviewJsonToronto, sampling=True)
    print('Data loaded sucessfully')

    progress.section("Matrix Generation")
    rating_matrix, timestamp_matrix, I_C_matrix, IC_dictionary = get_rating_timestamp_matrix(
        df)
    # get ratingWuserAvg_matrix
    rating_array = rating_matrix.toarray()
    user_average_array = rating_array.sum(axis=1) / np.count_nonzero(
        rating_array, axis=1)
    init_UI = np.zeros(rating_array.shape)
    init_UI[rating_array.nonzero()] = 1

    #Creating rating with user average array array
    for i in range(user_average_array.shape[0]):
        init_UI[i] = init_UI[i] * (user_average_array[i] - 0.001)
    user_average_array = init_UI
    ratingWuserAvg_array = rating_array - user_average_array
    ratingWuserAvg_matrix = sparse.csr_matrix(ratingWuserAvg_array)

    progress.section("Split for training")
    rtrain_implicit, rvalid_implicit, rtest_implicit, rtrain_userAvg_implicit, rvalid_userAvg_implicit, \
    rtest_userAvg_implicit, nonzero_index, rtime, item_idx_matrix_train_implicit,item_idx_matrix_valid_implicit, item_idx_matrix_test_implicit \
    = time_ordered_splitModified(rating_matrix=rating_matrix, ratingWuserAvg_matrix=ratingWuserAvg_matrix, timestamp_matrix=timestamp_matrix,
                                                                     ratio=[0.5,0.2,0.3],
                                                                     implicit=True,
                                                                     remove_empty=False, threshold=3,sampling=False,
                                                                     sampling_ratio=0.1, trainSampling=0.95)

    rtrain, rvalid, rtest, rtrain_userAvg, rvalid_userAvg, rtest_userAvg, nonzero_index, rtime, \
    item_idx_matrix_train,item_idx_matrix_valid, item_idx_matrix_test = time_ordered_splitModified(rating_matrix=rating_matrix,
                                                                     ratingWuserAvg_matrix=ratingWuserAvg_matrix, timestamp_matrix=timestamp_matrix,
                                                                     ratio=[0.5,0.2,0.3],
                                                                     implicit=False,
                                                                     remove_empty=False, threshold=3,
                                                                     sampling=False, sampling_ratio=0.1,
                                                                     trainSampling=0.95)

    rtrain = rtrain + rvalid + rtest
    rtrain_implicit = rtrain_implicit + rvalid_implicit + rtest_implicit

    progress.section("Get UC Matrix")
    #Get UC matrices
    U_C_matrix_explicit, U_C_matrix_implicit = get_UC_Matrix(
        I_C_matrix, rtrain_implicit)

    progress.section("Get IK Similarity")
    IK_MATRIX = ikGeneration(df)
    IK_similarity = train(IK_MATRIX)
    '''
    progress.section("Get IC Similarity")
    IC_similarity = train(I_C_matrix)
    '''

    progress.section("Get IP, IS, ID Dictionary")
    #intersection = get_intersection()
    intersection_yonge_and_finch, intersection_bloor_and_bathurst, intersection_spadina_and_dundas,\
    intersection_queen_and_spadina, intersection_bloor_and_yonge, intersection_dundas_and_yonge = get_intersection()
    IP_df, IP_dictionary = get_IP_matrix_dictionary(df, IK_similarity)
    IS_dictionary = get_IS_dictionary(df)
    #ID_dictionary = get_ID_dictionary(df,list(set(df['business_num_id'])),intersection)
    ID_dictionary_yonge_and_finch = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_yonge_and_finch)
    ID_dictionary_bloor_and_bathurst = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_bloor_and_bathurst)
    ID_dictionary_spadina_and_dundas = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_spadina_and_dundas)
    ID_dictionary_queen_and_spadina = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_queen_and_spadina)
    ID_dictionary_bloor_and_yonge = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_bloor_and_yonge)
    ID_dictionary_dundas_and_yonge = get_ID_dictionary(
        df, list(set(df['business_num_id'])), intersection_dundas_and_yonge)

    progress.section("user item predict")
    user_item_prediction_score = predict(rtrain,
                                         110,
                                         IK_similarity,
                                         item_similarity_en=True)
    UI_Prediction_Matrix = prediction(user_item_prediction_score, rtrain)

    progress.section("Save datafiles csv")
    save_dataframe_csv(df, args.data_dir, "Dataframe")

    progress.section("Save datafiles JSON")
    saveDictToJson(IC_dictionary,
                   args.data_dir,
                   'icDictionary',
                   trainOrTest='train')
    saveDictToJson(IP_dictionary,
                   args.data_dir,
                   'ipDictionary',
                   trainOrTest='train')
    saveDictToJson(IS_dictionary,
                   args.data_dir,
                   'isDictionary',
                   trainOrTest='train')
    #saveDictToJson(ID_dictionary, args.data_dir, 'idDictionary', trainOrTest='train')
    saveDictToJson(ID_dictionary_yonge_and_finch,
                   args.data_dir,
                   'idDictionary_yongefinch',
                   trainOrTest='train')
    saveDictToJson(ID_dictionary_bloor_and_bathurst,
                   args.data_dir,
                   'idDictionary_bloorbathurst',
                   trainOrTest='train')
    saveDictToJson(ID_dictionary_spadina_and_dundas,
                   args.data_dir,
                   'idDictionary_spadinadundas',
                   trainOrTest='train')
    saveDictToJson(ID_dictionary_queen_and_spadina,
                   args.data_dir,
                   'idDictionary_queenspadina',
                   trainOrTest='train')
    saveDictToJson(ID_dictionary_bloor_and_yonge,
                   args.data_dir,
                   'idDictionary_blooryonge',
                   trainOrTest='train')
    saveDictToJson(ID_dictionary_dundas_and_yonge,
                   args.data_dir,
                   'idDictionary_dundasyonge',
                   trainOrTest='train')

    progress.section("Save datafiles Numpy")
    save_numpy_csr(rtrain, args.data_dir, "rtrain")
    save_numpy_csr(I_C_matrix, args.data_dir, "icmatrix")
    #save_numpy(user_item_prediction_score, args.data_dir, "predictionScore")
    save_numpy(IK_similarity, args.data_dir,
               "IKbased_II_similarity")  #Tina requested for this name
    save_numpy(UI_Prediction_Matrix, args.data_dir, "UI_prediction_matrix")
    '''
コード例 #10
0
from utils.config import *
from utils.predict import predict, bert_model
import codecs
import sys
args = sys.argv
file_ = sys.stdout
if len(args) > 1:
    file_ = codecs.open(args[1], "a")
net_trained = bert_model()
print("how many emojis do you want ?")
emoji_num = int(input())
quit = False
while quit == False:
    s = ""
    print("Please input the text you want to emojify.")
    input_text = input()
    s += input_text
    if input_text == "q" or input_text == "\n":
        exit()
    output = predict(input_text, net_trained, emoji_num).tolist()
    for i in range(len(output[0])):
        s += label_to_emoji[output[0][i]]
    print(s, file=file_)
コード例 #11
0
def run():
	
	'''
	Reads the LIMITED data for SBER
	'''
	query = '''
		SELECT * FROM (
			SELECT * FROM {}_train 
			ORDER BY date_time DESC LIMIT 3000
		)Var1
		ORDER BY date_time ASC
	'''.format(ASSET)
	df = pd.read_sql(query, engine)
	
	'''
	Sets the datetime index, drops
	duplicates and nulls
	'''
	df['date_time'] = pd.to_datetime(
		df['date_time'], errors='coerce'
	)
	df = df.set_index('date_time')
	df.dropna(inplace=True)
	
	'''
	Calculates proportion of each row 
	in order book to the apropriate 
	section(bid or offer)
	'''
	#Offer
	OC_cols = df.loc[
		:, 'offer_count_10':'offer_count_1'
	]
	df_offer_count_proportion =\
	  OC_cols.div(OC_cols.sum(axis=1), axis=0)
	#Bid
	BC_cols = df.loc[
		:, 'bid_count_10':'bid_count_1'
	]
	df_bid_count_proportion =\
	  BC_cols.div(BC_cols.sum(axis=1), axis=0)
	
	'''
	Calculates offer/bid ratio per row
	'''
	offer_bid_ratio = pd.DataFrame(
		OC_cols.sum(axis=1) /\
		BC_cols.sum(axis=1))
	
	'''
	Drops columns with separate bids
	and asks
	'''
	cols_to_drop = [
		'offer_count_10', 'offer_count_9', 
		'offer_count_8', 'offer_count_7',
		'offer_count_6', 'offer_count_5', 
		'offer_count_4', 'offer_count_3',
		'offer_count_2', 'offer_count_1', 
		'bid_count_10', 'bid_count_9', 
		'bid_count_8', 'bid_count_7',
		'bid_count_6', 'bid_count_5', 
		'bid_count_4', 'bid_count_3',
		'bid_count_2', 'bid_count_1'
	]
	df.drop(cols_to_drop, axis=1, inplace=True)
	
	'''
	Concatenates single df for analysis
	and drops nulls
	'''
	list_of_dfs = [
		df,
		df_offer_count_proportion, 
		df_bid_count_proportion, 
		offer_bid_ratio
	]
	temp_df = pd.concat(list_of_dfs, axis=1)
	temp_df.dropna(inplace=True)

	'''
	Appends indicators and drops nulls
	'''
	for key in dict_of_tf:
		temp_df = append_indicators(
			temp_df, key, list_with_indicators
		)
	temp_df = temp_df.dropna()
	
	print(temp_df.shape)
	
	'''
	Makes predictions from the latest uploaded data
	with shifted threshold
	'''
	y_pred = predict(clf, temp_df, PROBA_THRESH)
	
	'''
	Uploads a signal to the DB
	'''
	if y_pred[-1] == 'up':
		sql = """UPDATE `trade_signals`
			SET `signal` = 'long',
			`dist_to_max`={},
			`dist_to_min`={} 
			WHERE `asset`='{}'"""\
			.format(
				DIST_TO_MAX_HIGH,
				DIST_TO_MIN_LOW,
				ASSET
			)
		cursor.execute(sql)
		db.commit()
		print(
			datetime.datetime.now().time(), 
			'Long ', 
			ASSET
		)
	elif y_pred[-1] == 'down':
		sql = """UPDATE `trade_signals`
			SET `signal` = 'short',
			`dist_to_max`={},
			`dist_to_min`={} 
			WHERE `asset`='{}'"""\
			.format(
				DIST_TO_MAX_LOW,
				DIST_TO_MIN_HIGH,
				ASSET
			)
		cursor.execute(sql)
		db.commit()
		print(
			datetime.datetime.now().time(), 
			'Short ', 
			ASSET
		)
	elif y_pred[-1] == 'nothing':
		sql = """UPDATE `trade_signals` 
			SET `signal` = 'nothing',
			`dist_to_max`=0,
			`dist_to_min`=0
			WHERE `asset`='{}'""".format(ASSET)
		cursor.execute(sql)
		db.commit()
		print(
			datetime.datetime.now().time(), 
			'Nothing ', 
			ASSET
		)
コード例 #12
0
import numpy as np
import argparse
from utils.build_model1 import *
from utils.build_model2 import *
from utils.predict import predict

parser = argparse.ArgumentParser()
parser.add_argument('--train_feat', type=str, help='Train Features? (y/n')
parser.add_argument('--train_model', type=str, help='Train Model? (y/n')
args = parser.parse_args()

train_y_step_1 = np.load('./trained/train_y_step_1.npy')
test_y_step_1 = np.load('./trained/test_y_step_1.npy')
train_y_step_2 = np.load('./trained/train_y_step_2.npy')
test_y_step_2 = np.load('./trained/test_y_step_2.npy')

if args.train_feat == 'y':
    from utils.features import *
else:
    train_X = np.load('./trained/train_X.npy')
    test_X = np.load('./trained/test_X.npy')

if args.train_model == 'y':
    model1 = build_model1(train_X, test_X, train_y_step_1, test_y_step_1)
    model2 = build_model2(train_X, test_X, train_y_step_2, test_y_step_2)
    predict(model1, model2, test_X)
else:
    from utils.prediction import *
コード例 #13
0
ファイル: app.py プロジェクト: philschmid/german-gpt2
def get_prediciton():
    data = request.get_json()
    response = predict(data['text'])
    return jsonify({"text": response})