예제 #1
0
def checkjs():
    query = request.args.get("query")
    '''
	if validators.url(query):
		valid=checkfor(query)
		print(valid)
		response = make_response(jsonify({"validation":valid}))
		return response
		
	else:
 	'''
    res, citation, = pred.predict(query)
    res, _ = pred.predict(query)
    if res == 1:
        prediction = "Probably Fake"
    elif res == 0:
        prediction = "Probably Real"
    else:
        prediction = "Argueable"
    response = make_response(
        jsonify({
            "prediction": prediction,
            "citations": citation
        }))
    return response
예제 #2
0
def image(bot, update):

    #bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING)

    m = update.message
    if m.photo:
        file_obj = m.photo[-1]
    elif m.document:
        file_obj = m.document
    else:
        m.reply_text("Please send me image as photo or file.")
        return

    photo_file = bot.getFile(file_obj.file_id)
    data = http.request("GET", photo_file.file_path).data

    id = md5.md5(data).hexdigest()

    file("debug/%s.jpg" % id, "wb").write(data)

    resp, result = predict("debug/%s.jpg" % id)

    reply_markup = InlineKeyboardMarkup([[
        InlineKeyboardButton("⭕️", callback_data="%s/%d/Y" % (id, resp)),
        InlineKeyboardButton("❓", callback_data="%s/%d/?" % (id, resp)),
        InlineKeyboardButton("❌", callback_data="%s/%d/N" % (id, resp)),
    ]])

    m.reply_text(
        "This image is a %d-jigen one. (S = %.4f)\n" % (resp, result) +
        "Please vote whether it is right.",
        quote=True,
        reply_markup=reply_markup,
    )
예제 #3
0
def show_gold(imgName):
    if g.ok == True:
        #process.process(imgName)
        result = predict(imgName)
        g.ok = False
        #return render_template("Untitled-2.html",img=imgName)
        return render_template("Untitled-3.html", lines=result)
    else:
        return redirect(url_for('index'))
예제 #4
0
def check():
    if request.method == 'POST':
        query = request.form['query']
        res, citations = pred.predict(query)
        if res == 1:
            prediction = "Probably Fake"
        elif res == 0:
            prediction = "Probably Real"
        else:
            prediction = "Argueable"
    print(res)
    return render_template('index.html',
                           prediction_text='The news is {}'.format(prediction),
                           citation=citations)
def mint1():

    #name = ent2.get("1.0", END)
    title_entry.delete("1.0", END)
    genre_entry.delete("1.0", END)
    keyword_entry.delete("1.0", END)
    keyword_entry.insert(0.0, pred.word_tokenize(pred.c_plot(pred.stop_words_fn(plot_entry.get("1.0", END)))))
    title_entry.insert(0.0, "NO TITLE PROVIDED !!")
    if len(plot_entry.get("1.0", END)) < 100:
        print(len(plot_entry.get("1.0", END)))
        genre_entry.insert(0.0, pred.predict1(plot_entry.get("1.0", END)))
    else:
        print(len(plot_entry.get("1.0", END)))
        genre_entry.insert(0.0, pred.predict(plot_entry.get("1.0", END)))

    print('gui')
예제 #6
0
def tweet(data):

    CK = config_tw.CONSUMER_KEY
    CS = config_tw.CONSUMER_SECRET
    AT = config_tw.ACCESS_TOKEN
    ATS = config_tw.ACCESS_TOKEN_SECRET
    twitter = OAuth1Session(CK, CS, AT, ATS) #認証処理

    url = "https://api.twitter.com/1.1/statuses/update.json" #ツイートポストエンドポイント

    weather = ["#sunny","#cloud","#rainy"]
    key = pred.predict(data)
    #key = 0
    tweet = weather[key]

    params = {"status" : tweet}

    res = twitter.post(url, params = params) #post送信

    if res.status_code == 200: #正常投稿出来た場合
        print("Success.")
    else: #正常投稿出来なかった場合
        print("Failed. : %d"% res.status_code)
예제 #7
0
def resultgold(imgName):
    if imgName != "None":
        result = predict(imgName)
        return render_template("template-gold.html", result=result)
    else:
        return render_template("template.html")
if __name__ == '__main__':
    warnings.filterwarnings('ignore')

    train, test = fvc_dataset(args.data_folder)
    meta = select_meta(train)

    train_model(
        x=train[meta],
        y=train['FVC'],
        lgbm_params=lgbm_fvc_params,
        phase='fvc',
        output_folder=args.output_folder,
    )

    fvc_pred = predict(train[meta],
                       phase='fvc',
                       output_folder=args.output_folder)

    #display(fvc_pred.head())

    conf_x, conf_y = conf_dataset(train, fvc_pred)
    train_model(
        x=conf_x,
        y=conf_y,
        lgbm_params=lgbm_conf_params,
        phase='conf',
        output_folder=args.output_folder,
    )

    inf_fvc_pred = predict(test[meta],
                           phase='fvc',
예제 #9
0
                    cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

                    # select hand roi and apply mask by skin color
                    hand_roi = frame_blur[y:y + h, x:x + w].copy()
                    roi_ycrcb = cv.cvtColor(hand_roi, cv.COLOR_BGR2YCrCb)
                    lower = np.array([60, max(0, cr - cr_diff), max(0, cb - cb_diff)], dtype="uint8")
                    upper = np.array([255, min(255, cr + cr_diff), min(255, cb + cb_diff)], dtype="uint8")
                    mask = cv.inRange(roi_ycrcb, lower, upper)

                    # apply morphology operations to improve mask quality
                    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (7, 7))
                    mask = cv.morphologyEx(mask, cv.MORPH_CLOSE, kernel)
                    mask = cv.dilate(mask, kernel)

                    # make prediction on gesture
                    pred = predict(model, mask, device)
                    char = class_map[pred.item()]
                    cv.putText(frame_resized, "Char: " + char, (20, 430), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                except:
                    calibrated = False
                    x, y, w, h = (50, 50, 250, 250)
            else:
                calibrated = False

        else:

            # set color of central pixel of hand roi as skin color
            if cv.waitKey(1) & 0xFF == ord(" "):
                hand_roi = frame_blur[y:y + h, x:x + w].copy()
                roi_ycrcb = cv.cvtColor(hand_roi, cv.COLOR_BGR2YCrCb)
                _, cr, cb = roi_ycrcb[h // 2, w // 2, :]
예제 #10
0
파일: main.py 프로젝트: PerceptronV/Exnate
data_param_fname = 'data_params.json'
model_param_fname = 'model_params.json'
feat_fname = 'feature_names.json'
weights_fname = 'weights.h5'

print('Getting model dependencies...')
get_params(subdir, data_param_fname, model_param_fname, feat_fname,
           weights_fname)

data_param = json.load(open(subdir + data_param_fname, 'r'))
model_param = json.load(open(subdir + model_param_fname, 'r'))
feature_names = json.load(open(subdir + feat_fname, 'r'))

print('Building model by passing in real data...')
pred_model = ForecastModel(model_param['input_shape'],
                           model_param['residual_shape'],
                           model_param['output_shape'],
                           model_param['return_sequences'],
                           model_param['rnn_units'])
pred_model = build(pred_model, data_param, model_param)

print('Loading weights...')
pred_model.load_weights(subdir + weights_fname)

while 1:
    start = dt.strptime(input('Enter start date (yyyy-mm-dd): '), '%Y-%m-%d')
    end = dt.strptime(input('Enter end date (yyyy-mm-dd): '), '%Y-%m-%d')
    _ = predict(start, end, pred_model, data_param, model_param, feature_names)
    print('\n')
예제 #11
0
def main():
    ''' Main function  '''

    ## %% =========== Part 1: Loading and Visualizing Data =============
    #%  We start the exercise by first loading and visualizing the dataset. 
    #%  You will be working with a dataset that contains handwritten digits.
    #%


    # Read the Matlab data
    m, n, X, y = getMatlabTrainingData()

    # number of features
    input_layer_size = n    
 

    # Select some random images from X
    print('Selecting random examples of the data to display.\n')
    sel = np.random.permutation(m)
    sel = sel[0:100]
    
    #  Re-work the data orientation of each training example
    image_size = 20
    XMatlab = np.copy(X) # Need a deep copy, not just the reference
    for i in range(m): 
        XMatlab[i, :] = XMatlab[i, :].reshape(image_size, image_size).transpose().reshape(1, image_size*image_size)

    # display the sample images
    displayData(XMatlab[sel, :])

    # Print Out the labels for what is being seen. 
    print('These are the labels for the data ...\n')
    print(y[sel, :].reshape(10, 10))

    # Pause program
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% ================ Part 2: Loading Parameters ================
#% In this part of the exercise, we load some pre-initialized 
# % neural network parameters.

    print('\nLoading Saved Neural Network Parameters ...\n')

    # Load the weights into variables Theta1 and Theta2
    import scipy .io as sio
    fnWeights = '/home/jennym/Kaggle/DigitRecognizer/ex4/ex4weights.mat'
    weights = sio.loadmat(fnWeights)
    Theta1 = weights['Theta1']
    Theta2 = weights['Theta2']

    #% Unroll parameters 
    nn_params = np.hstack((Theta1.ravel(order='F'), Theta2.ravel(order='F')))

#%% ================ Part 3: Compute Cost (Feedforward) ================
#%  To the neural network, you should first start by implementing the
#%  feedforward part of the neural network that returns the cost only. You
#%  should complete the code in nnCostFunction.m to return cost. After
#%  implementing the feedforward to compute the cost, you can verify that
#%  your implementation is correct by verifying that you get the same cost
#%  as us for the fixed debugging parameters.
#%
#%  We suggest implementing the feedforward cost *without* regularization
#%  first so that it will be easier for you to debug. Later, in part 4, you
#%  will get to implement the regularized cost.
#%
    print('\nFeedforward Using Neural Network ...\n')

    #% Weight regularization parameter (we set this to 0 here).
    MLlambda = 0.0

    # Cluge, put y back to matlab version, then adjust to use python
    #  indexing later into y_matrix
    y[(y == 0)] = 10
    y = y - 1
    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                   num_labels, X, y, MLlambda)

    print('Cost at parameters (loaded from ex4weights): ' + str(J) + 
          '\n (this value should be about 0.287629)\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

#%% =============== Part 4: Implement Regularization ===============
#%  Once your cost function implementation is correct, you should now
#%  continue to implement the regularization with the cost.
#%

    print('\nChecking Cost Function (with Regularization) ... \n')

    # % Weight regularization parameter (we set this to 1 here).
    MLlambda = 1.0

    J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                   num_labels, X, y, MLlambda)

    print('Cost at parameters (loaded from ex4weights): ' + str(J) +
         '\n(this value should be about 0.383770)\n');

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% ================ Part 5: Sigmoid Gradient  ================
#%  Before you start implementing the neural network, you will first
#%  implement the gradient for the sigmoid function. You should complete the
#%  code in the sigmoidGradient.m file.
#%

    print('\nEvaluating sigmoid gradient...\n')
    g = sigmoidGradient(np.array([1, -0.5,  0,  0.5, 1]))
    print('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:\n  ')
    print(g)
    print('\n\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

 
#%% ================ Part 6: Initializing Parameters ================
#%  In this part of the exercise, you will be starting to implement a two
#%  layer neural network that classifies digits. You will start by
#%  implementing a function to initialize the weights of the neural network
#%  (randInitializeWeights.m)

    print('\nInitializing Neural Network Parameters ...\n')

    initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
    initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)

    #% Unroll parameters
    initial_nn_params = np.hstack(( initial_Theta1.ravel(order = 'F'),
                                   initial_Theta2.ravel(order = 'F')))
    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  


#%% =============== Part 7: Implement Backpropagation ===============
#%  Once your cost matches up with ours, you should proceed to implement the
#%  backpropagation algorithm for the neural network. You should add to the
#%  code you've written in nnCostFunction.m to return the partial
#%  derivatives of the parameters.
#%
    print('\nChecking Backpropagation... \n')

    #%  Check gradients by running checkNNGradients
    checkNNGradients()

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

#%% =============== Part 8: Implement Regularization ===============
#%  Once your backpropagation implementation is correct, you should now
#%  continue to implement the regularization with the cost and gradient.
#%

    print('\nChecking Backpropagation (w/ Regularization) ... \n')

    #%  Check gradients by running checkNNGradients
    MLlambda = 3
    checkNNGradients(MLlambda)

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")  

    #% Also output the costFunction debugging values
    debug_J, _  = nnCostFunction(nn_params, input_layer_size,
                          hidden_layer_size, num_labels, X, y, MLlambda)

    print('\n\n Cost at (fixed) debugging parameters (w/ lambda = ' + 
          '{0}): {1}'.format(MLlambda, debug_J))
    print('\n  (this value should be about 0.576051)\n\n')

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")

#%% =================== Part 8b: Training NN ===================
#%  You have now implemented all the code necessary to train a neural 
#%  network. To train your neural network, we will now use "fmincg", which
#%  is a function which works similarly to "fminunc". Recall that these
#%  advanced optimizers are able to train our cost functions efficiently as
#%  long as we provide them with the gradient computations.
#%
    print ('\nTraining Neural Network... \n')

    #%  After you have completed the assignment, change the MaxIter to a larger
    #%  value to see how more training helps.
    #% jkm change maxIter from 50-> 400
    options = {'maxiter': MAXITER}

    #%  You should also try different values of lambda
    MLlambda = 1

    #% Create "short hand" for the cost function to be minimized
    costFunc = lambda p: nnCostFunction(p, input_layer_size, hidden_layer_size,
                               num_labels, X, y, MLlambda)

    #% Now, costFunction is a function that takes in only one argument (the
    #% neural network parameters)

    '''
    NOTES: Call scipy optimize minimize function
        method : str or callable, optional Type of solver. 
           CG -> Minimization of scalar function of one or more variables 
                 using the conjugate gradient algorithm.

        jac : bool or callable, optional Jacobian (gradient) of objective function. 
              Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. 
              If jac is a Boolean and is True, fun is assumed to return the gradient 
              along with the objective function. If False, the gradient will be 
              estimated numerically. jac can also be a callable returning the 
              gradient of the objective. In this case, it must accept the same 
              arguments as fun.
        callback : callable, optional. Called after each iteration, as callback(xk), 
              where xk is the current parameter vector.
'''
    # Setup a callback for displaying the cost at the end of each iteration 
    class Callback(object): 
        def __init__(self): 
            self.it = 0 
        def __call__(self, p): 
            self.it += 1 
            print "Iteration %5d | Cost: %e" % (self.it, costFunc(p)[0]) 
 
   
    result = sci.minimize(costFunc, initial_nn_params, method='CG', 
                   jac=True, options=options, callback=Callback()) 
    nn_params = result.x 
    cost = result.fun 
 
    # matlab: [nn_params, cost] = fmincg(costFunction, initial_nn_params, options);

    #% Obtain Theta1 and Theta2 back from nn_params
    Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
               (hidden_layer_size, (input_layer_size + 1)), 
                order = 'F')

    Theta2 = np.reshape(nn_params[hidden_layer_size * (input_layer_size + 1):], 
               (num_labels, (hidden_layer_size + 1)), 
               order = 'F')  


    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")


#%% ================= Part 9: Visualize Weights =================
#%  You can now "visualize" what the neural network is learning by 
#%  displaying the hidden units to see what features they are capturing in 
#%  the data.#

    print('\nVisualizing Neural Network... \n')

    displayData(Theta1[:, 1:])

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")


#%% ================= Part 10: Implement Predict =================
#%  After training the neural network, we would like to use it to predict
#%  the labels. You will now implement the "predict" function to use the
#%  neural network to predict the labels of the training set. This lets
#%  you compute the training set accuracy.

    pred = predict(Theta1, Theta2, X)

    # JKM - my array was column stacked - don't understand why this works
    pp = np.row_stack(pred)
    accuracy = np.mean(np.double(pp == y)) * 100

    print('\nTraining Set Accuracy: {0} \n'.format(accuracy))

    # Pause
    print("Program paused. Press Ctrl-D to continue.\n")
    code.interact(local=dict(globals(), **locals()))
    print(" ... continuing\n ")

  
# ========================================

    # All Done!
    return

def select_meta(df):
    meta = list(df.columns)
    meta.remove('Patient')
    meta.remove('FVC')

    return meta


def submission(pred_FVC, pred_confidence):
    sub = pd.read_csv(args.data_folder + '/sample_submission.csv')
    sub['FVC'] = pred_FVC
    sub['Confidence'] = pred_confidence
    sub.to_csv(args.output_folder + '/submission.csv', index=False)


if __name__ == '__main__':
    train, sub = pfp_dataset(args.data_folder)  # データの準備
    train_model(df=train,
                meta=select_meta(train),
                epochs=args.epochs,
                output_folder=args.output_folder)

    pred_FVC, pred_confidence = predict(df=sub,
                                        meta=select_meta(train),
                                        num_model=5,
                                        output_folder=args.output_folder)

    submission(pred_FVC, pred_confidence)