Пример #1
0
    def test_lasso(self):
        regularizer_weight = 0.1
        loss, predict, update = lasso(regularizer_weight=regularizer_weight,
                                      transform=self.transform)
        w, x, learning_rate = self.wxlr()
        d = self.transform(x).size
        u = np.zeros(d)
        v = np.zeros(d)

        y = +1
        yhat = predict(u, v, x)
        self.assertEqual(yhat, 0)
        self.assertAlmostEqual(loss(u, v, x, y), .5)
        u1, v1 = update(u, v, x, y, learning_rate)
        if False:
            print 'u1', u1
            print 'v1', v1
        self.np_almost_equal(u1, np.array([.36, .45, 1.8]))
        self.np_almost_equal(v1, np.array([0, 0, 0]))

        y = -1
        yhat = predict(u1, v1, w)
        u2, v2 = update(u1, v1, x, y, learning_rate)
        if False:
            print 'yhat', yhat
            print 'u2', u2
            print 'v2', v2
            print 'write more'
Пример #2
0
def newt(symptom, description, macid):
    print(symptom, description)
    t_id = createticket.loginAndCreateTickets(symptom, description)
    print(t_id)
    predict(macid)
    generate_mail.raiseticket('*****@*****.**', 'issue', t_id)
    return str(t_id)
Пример #3
0
def test_predict_errors():
	#Errors and edge cases relating to predict()
	assert predict("John Ives", "yahoo.com") == "Unknown. No email address with that domain in our records."
	assert predict("John Ferguson", "bing.com") == "Unknown. No email address with that domain in our records."
	assert predict("Paul Irish", "google.com") == "Unknown. There are at least 2 equally compelling candidates"
	assert predict("Dr. Jason Henry Simon Birenbaum", "apple.com") == "*****@*****.**"
	assert predict("Sarah Wilson", "alphasights.com") == "*****@*****.**"
Пример #4
0
def pet_brainmask_convnet(source_dir,
                          target_dir,
                          ratios,
                          feature_dim=2,
                          batch_size=2,
                          nb_epoch=10,
                          images_to_predict=None,
                          clobber=False,
                          model_name=False):
    images = prepare_data(source_dir, target_dir, ratios, batch_size,
                          feature_dim, clobber)

    ### 1) Define architecture of neural network
    model = make_model(batch_size)

    ### 2) Train network on data

    if model_name == None: model_name = set_model_name(target_dir, feature_dim)
    if not exists(model_name) or clobber:
        #If model_name does not exist, or user wishes to write over (clobber) existing model
        #then train a new model and save it
        X_train = np.load(prepare_data.train_x_fn + '.npy')
        Y_train = np.load(prepare_data.train_y_fn + '.npy')
        X_test = np.load(prepare_data.test_x_fn + '.npy')
        Y_test = np.load(prepare_data.test_y_fn + '.npy')
        model = compile_and_run(model, X_train, Y_train, X_test, Y_test,
                                prepare_data.batch_size, nb_epoch)
        model.save(model_name)

    ### 3) Produce prediction
    predict(model_name, source_dir, target_dir, images, images_to_predict)

    return 0
def test_predict():

    modelpath = os.path.join(SAVE_DIR, "InceptionResNetV2_Att.hdf5")
    savepath = os.path.join(SAVE_DIR, "InceptionResNetV2_Att-result.txt")
    labelspath = 'validlabels.txt'
    print('start predict')
    predict(modelpath, valid_data, savepath)
    valid(labelspath, savepath)
Пример #6
0
def nnCostFunction(Theta1, Theta2, input_layer_size, hidden_layer_size,
                   num_labels, X, y, l):
    m = X.shape[0]  #训练集大小
    J = 0
    Theta1_grad = np.fromfunction(getSize, (Theta1.shape[0], Theta1.shape[1]))
    Theta2_grad = np.fromfunction(getSize, (Theta2.shape[0], Theta2.shape[1]))

    Y = np.zeros([m, num_labels])

    for i in range(m):
        Y[i, y[i] - 1] = 1  #Y[0]代表1, Y[1]代表2, ... Y[9]代表0

    (p, h1, h2) = predict(Theta1, Theta2, X, False)

    tmp = np.dot(np.log(h2), -Y.T) - np.dot(np.log(1 - h2), (1 - Y.T))
    tmp = np.multiply(tmp, np.eye(m))

    sumJ = 0
    for i in range(m):
        sumJ += tmp[i, i]

    T1 = copy.deepcopy(Theta1)  #注意要用深拷贝
    # T1 = Theta1
    T1[:, 0] = 0
    T2 = copy.deepcopy(Theta2)
    # T2 = Theta2
    T2[:, 0] = 0
    J = 1.0 * sumJ / m + 1.0 * l / (2 * m) * (np.sum(pow(T1, 2)) +
                                              np.sum(pow(T2, 2)))

    Delta_2 = np.zeros([num_labels, hidden_layer_size + 1])
    Delta_1 = np.zeros([hidden_layer_size, input_layer_size + 1])

    biasUnit = np.ones([m, 1])
    t1 = np.dot(np.concatenate((biasUnit, X), axis=1), Theta1.T)
    sG = sigmoidGradient(np.concatenate((biasUnit, t1), axis=1))
    for i in range(m):
        example = np.matrix(X[i])
        (p, h1, h2) = predict(Theta1, Theta2, example, False)
        sG_t = np.matrix(sG[i]).T
        delta3 = h2.T - np.matrix(Y[i, :]).T
        delta2 = np.multiply(np.dot(Theta2.T, delta3), sG_t)
        row = delta2.shape[0]
        delta2 = delta2[1:row, :]

        biasUnit = np.ones([example.shape[0], 1])
        Delta_2 = Delta_2 + np.dot(delta3,
                                   np.concatenate((biasUnit, h1), axis=1))
        Delta_1 = Delta_1 + np.dot(delta2,
                                   np.concatenate((biasUnit, example), axis=1))

    Theta1_grad = Delta_1 * 1.0 / m + 1.0 * l / m * T1
    Theta2_grad = Delta_2 * 1.0 / m + 1.0 * l / m * T2

    # grad = pack_thetas(Theta1_grad, Theta2_grad)
    print('Cost = ' + str(J))

    return J, np.array(Theta1_grad), np.array(Theta2_grad)
Пример #7
0
def main():
    # # TODO 1: 预处理数据(缺失值处理、PCA降维)
    # train_feature_process(train_all=True)
    # predict_feature_process(predict_all=True)

    # TODO 1: 模型训练
    train()

    # TODO 2: 预测并输出结果至./result/svc.csv
    predict()
Пример #8
0
 def newt(symptom, description, macid):
     print(symptom, description)
     query = "select emailid from userdetails where MAC_ID = '" + macid + "';"
     df = create_db.fetchquery(query)
     email = df.iloc[0]["emailid"]
     t_id = createticket.loginAndCreateTickets(symptom, description)
     #     t_id = "456"
     print(t_id)
     predict(macid)
     generate_mail.raiseticket(str(email), 'issue', t_id)
     return str(t_id)
Пример #9
0
    def process_pixels(self):
        # grab Canvas pixels
        pixelmap = QPixmap.grabWidget(self)

        # resize pixels and convert to Image
        resized_image = self.resize(pixelmap)

        # make prediction with pixels as input
        predict(self.get_pixels(resized_image))

        # empty canvas
        self.passed_points = []
        self.repaint()
Пример #10
0
def minc_keras(source_dir, target_dir, input_str, label_str, ratios, feature_dim=2, batch_size=2, nb_epoch=10, images_to_predict=None, clobber=False, model_fn='model.hdf5',model_type='model_0_0', images_fn='images.csv',  verbose=1 ):

    data_dir = target_dir + os.sep + 'data'+os.sep
    report_dir = target_dir+os.sep+'report'+os.sep
    train_dir = target_dir+os.sep+'predict'+os.sep+'train'+os.sep
    test_dir = target_dir+os.sep+'predict'+os.sep+'test'+os.sep
    validate_dir = target_dir+os.sep+'predict'+os.sep+'validate'+os.sep
    model_dir=target_dir+os.sep+'model'
    if not exists(train_dir): makedirs(train_dir)
    if not exists(test_dir): makedirs(test_dir)
    if not exists(validate_dir): makedirs(validate_dir)
    if not exists(data_dir): makedirs(data_dir)
    if not exists(report_dir): makedirs(report_dir) 
    if not exists(model_dir): makedirs(model_dir) 

    images_fn = set_model_name(images_fn, report_dir, '.csv')
    [images, image_dim] = prepare_data(source_dir, data_dir, report_dir, input_str, label_str, ratios, batch_size,feature_dim, images_fn,  clobber=clobber)

    ### 1) Define architecture of neural network
    model = make_model(image_dim, model_type)

    ### 2) Train network on data

    model_fn =set_model_name(model_fn, model_dir)
    history_fn = splitext(model_fn)[0] + '_history.json'

    print( 'Model:', model_fn)
    if not exists(model_fn) or clobber:
    #If model_fn does not exist, or user wishes to write over (clobber) existing model
    #then train a new model and save it
        X_train=np.load(prepare_data.train_x_fn+'.npy')
        Y_train=np.load(prepare_data.train_y_fn+'.npy')
        X_validate=np.load(prepare_data.validate_x_fn+'.npy')
        Y_validate=np.load(prepare_data.validate_y_fn+'.npy')
        model,history = compile_and_run(model, model_fn, history_fn, X_train,  Y_train, X_validate,  Y_validate, nb_epoch)

    ### 3) Evaluate model on test data
    model = load_model(model_fn)
    X_test=np.load(prepare_data.test_x_fn+'.npy')
    Y_test=np.load(prepare_data.test_y_fn+'.npy')
    test_score = model.evaluate(X_test, Y_test, verbose=1)
    print('Test: Loss=', test_score[0], 'Dice:', test_score[1])
    np.savetxt(report_dir+os.sep+'model_evaluate.csv', np.array(test_score) )

    ### 4) Produce prediction
    #predict(model_fn, validate_dir, data_dir, images_fn, images_to_predict=images_to_predict, category="validate", verbose=verbose)
    #predict(model_fn, train_dir, data_dir, images_fn, images_to_predict=images_to_predict, category="train", verbose=verbose)
    predict(model_fn, test_dir, data_dir, images_fn, images_to_predict=images_to_predict, category="test", verbose=verbose)
    plot_loss(history_fn, model_fn, report_dir)

    return 0
Пример #11
0
def predict_tta(model, ids, output, kind='test', batch_size=32, n_tta=5):
    size = dataset.SIZE
    base_transform = dataset.val_transform()
    
    preds = np.zeros((1+n_tta, len(ids), dataset.NUM_CLASSES), dtype=np.float32)
    preds[0] = predict(model, ids, transform=base_transform, kind=kind, batch_size=batch_size)
            
    tta_transform = dataset.train_transform()
    
    for tta_idx in range(1, n_tta):
        preds[tta_idx] = predict(model, ids, transform=tta_transform, kind=kind, batch_size=batch_size)
        
    mean_preds = np.mean(preds, axis=0)
    np.save(output, mean_preds)
Пример #12
0
def test_predict_errors():
    #Errors and edge cases relating to predict()
    assert predict(
        "John Ives", "yahoo.com"
    ) == "Unknown. No email address with that domain in our records."
    assert predict(
        "John Ferguson", "bing.com"
    ) == "Unknown. No email address with that domain in our records."
    assert predict(
        "Paul Irish", "google.com"
    ) == "Unknown. There are at least 2 equally compelling candidates"
    assert predict("Dr. Jason Henry Simon Birenbaum",
                   "apple.com") == "*****@*****.**"
    assert predict("Sarah Wilson",
                   "alphasights.com") == "*****@*****.**"
 def newt(subject, description, macid):
     print(subject, description, macid)
     r_json = manage_engine_ticket_raising.ticket_raising(
         subject, description, config["DEFAULT"]["name manage"],
         config["DEFAULT"]["id manage"])
     #     print(r_json)
     t_id = r_json['request']['id']
     #     t_id = "456"
     print(t_id)
     predict(macid)
     query = "select emailid from userdetails where MAC_ID = '" + macid + "';"
     df = create_db.fetchquery(query)
     email = df.iloc[0]["emailid"]
     mail_send.raiseticket(str(email), 'issue', t_id)
     return str(t_id)
Пример #14
0
 def __init__(self):
     self.__entrenamiento = train()
     self.__predecir = predict()
     self.__connection = mysql.connector.connect(host='localhost',
                                                 database='####',
                                                 user='******',
                                                 password='******')
Пример #15
0
    def getempty():
        print('start')
        d = request.json
        print('new')
        print(d)

        txt = d['txt']

        # splits = txt.split()
        print(txt)
        prediction = predict(txt)
        print(prediction)

        el = prediction['elements']
        char = ''
        word = ''
        if len(el) > 0 and 'char_position' in el[0]:
            char = el[0]['char_position']
            word = el[0]['word']
        elements = [{
            'char_position': char,
            'model_name': 'asaf',
            'word': word
        }]

        return jsonify({
            'ok': True,
            'text': txt,
            'elements': elements
            # 'error': 'Missing SUBJ element'
        })
Пример #16
0
def home():
    print request.form
    vapi = None
    ll = None
    if request.method == 'POST':
        i = request.form["img"]
        l = request.form["link"]
        ll = request.form["l2"]
        print ll, "222333ljkj"
        user_input = {"article_link": l, "image_link": i}
        #  bhgjjjjjjjjjjj
        #  print i,l
        #  bbbbbb
        #  b = e.user_extract(user_input)
        b, t = predict(user_input["article_link"], user_input["image_link"])
        vapi = vap.get_json(request.form["img"])
        #  bbbbbbbbbb
        print vapi
        #  print vapi["scores"], "##########"
        x = list()
        for j in json.loads(vapi):
            print j, "~~~~~~~~~~####"
            x.append(j["scores"].keys()[np.argmax(j["scores"].values())])

        vap.dat = {"img": vapi, "wat": enum[np.argmax(b)], "xd": x, "tax": t}
    #  print vapi
    return render_template('main/home.html', vap=vapi, l2=ll)
Пример #17
0
def main():

    model = torch.load(cfg.MODEL_PATH)
    video_path =  sys.argv[1]
    video  = cv2.VideoCapture(video_path)

    ret, frame = video.read()
    out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (frame.shape[1],frame.shape[0]))
    frames_count = int(out.get(cv2.CAP_PROP_FRAME_COUNT))
    print(frames_count)
    #while ret:
    for i in tqdm(range(1000)):
        result = predict(frame ,model)

        for left_up, right_bottom, class_name, prob in result:
            cv2.rectangle(frame, left_up, right_bottom, (124,32,225), 2)
            label = class_name + str(round(prob, 2))
            text_size, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
            p1 = (left_up[0], left_up[1] - text_size[1])
            cv2.rectangle(frame, (p1[0] - 2 // 2, p1[1] - 2 - baseline), (p1[0] + text_size[0], p1[1] + text_size[1]), (124,32,225),-1)
            cv2.putText(frame, label, (p1[0], p1[1] + baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, 8)

        
        out.write(frame)
        ret, frame = video.read()
    out.release()
Пример #18
0
    def post(self):  # prefer POST
        parser = reqparse.RequestParser()  # parse the args
        parser.add_argument('request', type=dict)  # get the data
        args = parser.parse_args()
        response = {}

        try:
            inp = args['request']
            print("Input recieved : ", inp)
            results = predict(inp)

        except Exception as e:
            print("Inside Exception")
            print("Exception occured due to {0}".format(e))
            response["response"] = {}
            response["response"]["Message"] = "Failure. Check input parameters"
            response["response"]["Values"] = {"RecommendedProduct": "NoResult"}
            return jsonify(response)

        else:
            # print("Exception due to {0}".format(e))
            #
            response["response"] = {}
            response["response"]["Values"] = results
            response["response"]["Message"] = "Success"
            return jsonify(response)
Пример #19
0
    def test_perceptron(self):
        loss, predict, update = perceptron(self.transform)
        w, x, learning_rate = self.wxlr()

        y = +1
        self.assertAlmostEqual(loss(w, x, y), 0)
        self.assertEqual(predict(w, x), +1)
        self.np_almost_equal(update(w, x, y, learning_rate),
                             w)

        y = -1
        self.assertAlmostEqual(loss(w, x, y), 74)
        self.assertEqual(predict(w, x), +1)
        expected_update = w + learning_rate * y * np.array([4, 5, 20])
        self.np_almost_equal(update(w, x, y, learning_rate),
                             expected_update)
Пример #20
0
    def post(self, request, format = None):
        "this wil trigger when there will be a post request"
        ph_value = request.data.get("ph_value", 0.0)
        temp_val = request.data.get("temp_val", 0.0)
        humidity_val = request.data.get("humidity_val", 0.0)
        rainfall_val = request.data.get("rainfall_val", 0.0)
        moisture_val = request.data.get("moisture_val", 0.0)

        pass_list = [
            [
            ph_val, 
            temp_val,
            humidity_val, 
            rainfall_val, 
            moisture_val,
            ]
        ]
        result = predict(pass_list)

        return Response(
            {
            "Crop":result
            }, 
            status = status.HTTP_201_CREATED
        )
Пример #21
0
def main():
    buffer = []
    t = 0
    previous_touch = ''

    while True:
        while t < N_SAMPLES:
            data = ser.readline()
            clear()
            print_epi(-1)
            if 0 <= t <= 10 or 31 <= t <= 40:
                print('.')
            if 11 <= t <= 20 or 41 <= t <= 50:
                print('..')
            if 21 <= t <= 30 or 51 <= t <= 60:
                print('...')
            string_data = data.decode().rstrip()
            buffer.append(string_data)
            t += 1
        string = ",".join(buffer)
        string_list = string.split(",")
        float_list = string_to_float(string_list)
        converted_list = convert_data(float_list[0])
        predictions, probabilities = predict(converted_list)
        clear()
        print_response(predictions, probabilities)
        buffer = []
        t = 0
Пример #22
0
def genres():

  data = sio.loadmat('normalized_testdata.mat')
  X = data['X']

  theta = sio.loadmat('train_res_normal_2000.mat')

  theta_1 = theta['Theta1']
  theta_2 = theta['Theta2']

  print 'start to predict'

  res = predict(theta_1, theta_2, X)

  print 'predict overed'

  for idx, data in enumerate(res):
    if data == 10:
      res[idx] = 0

  img_id = [i for i in xrange(1, len(res) + 1)]

  data = np.column_stack((np.array(img_id), np.array(res)))

  import time

  res_file = 'res%d.csv' %(int(time.clock() * 1000))

  with open(res_file, 'w') as f:
    f.write('ImageId,Label\n')
    np.savetxt(f, data, fmt='%i', delimiter=',')

  print 'generate res file[%s]' % res_file
Пример #23
0
def main():
    model_name = MODEL_FILENAME
    if len(sys.argv) == 2:
        model_name = sys.argv[1]
    print('Use model: ' + model_name)
    model = load_model(model_name)

    account_info = get_account_from_ini(INI_FILENAME)
    print('ID: "' + account_info['user_account'] + '" start login test ...')

    for i in xrange(sys.maxint):
        driver = seleniumrequests.Chrome(DRIVER_PATH)
        driver.get(LOGIN_URL)

        img = get_vcode(driver)
        vcode = convert_vcode(img)

        print('Start predict (' + str(i) + ')!')
        vcode_str = predict(vcode, model_name, model)
        print('Get vcode: "' + vcode_str + '"')
        if_login = login(driver, account_info, vcode_str)
        print('Result: ' + str(if_login))

        if not if_login and not SHOW_IMG:
            ImageOps.invert(img)\
            .filter(ImageFilter.ModeFilter)\
            .convert('L')\
            .show()

        if not if_login or KEEP_TEST:
            driver.close()
        else:
            break

    raw_input('Click ENTER to close!!')
Пример #24
0
def home():
    print request.form
    vapi = None
    ll = None
    if request.method == 'POST':
         i = request.form["img"]
         l = request.form["link"]
         ll = request.form["l2"]
         print ll, "222333ljkj"
         user_input = {
             "article_link": l,
             "image_link": i
         }
        #  bhgjjjjjjjjjjj
        #  print i,l
        #  bbbbbb
        #  b = e.user_extract(user_input)
         b , t = predict(user_input["article_link"], user_input["image_link"])
         vapi = vap.get_json(request.form["img"])
        #  bbbbbbbbbb
         print vapi
        #  print vapi["scores"], "##########"
         x = list()
         for j in json.loads(vapi):
              print j,"~~~~~~~~~~####"
              x .append(j["scores"].keys()[np.argmax(j["scores"].values())]  )

         vap.dat = {"img":vapi,"wat" : enum[np.argmax(b) ], "xd": x, "tax": t}
        #  print vapi
    return render_template('main/home.html', vap= vapi, l2=ll)
Пример #25
0
 def predict(self,
             input_list,
             confidence_theshold=.6,
             iou_theshould=.5,
             async_mode=False):
     batch_predictions = []
     get_from = 0
     input_size = input_list.shape[2]
     input_dict = {self.input_blob: input_list}
     request_handle = self.exec_net.requests[self.current_request_id]
     if async_mode:
         next_request_id = self.current_request_id + 1
         if next_request_id == self.num_requests:
             next_request_id = 0
     else:
         request_handle.wait()
         next_request_id = self.current_request_id
     self.exec_net.start_async(request_id=next_request_id,
                               inputs=input_dict)
     if async_mode:
         self.current_request_id = next_request_id
     request_handle.wait()
     pred_dict = request_handle.outputs
     for preds in pred_dict.values():
         preds = np.transpose(preds, [0, 2, 3, 1])
         get_to = get_from + 3
         batch_predictions.append(
             region_np(preds, self._ANCHORS[get_from:get_to], input_size))
         get_from = get_to
     batch_predictions = np.concatenate(batch_predictions, axis=1)
     return predict(batch_predictions, confidence_theshold, iou_theshould)
Пример #26
0
def train_and_predict():
    result = []
    models = [
        CNN, FAST, CHAR_CNN, TEXT_ATT_BI_GRU, TEXT_ATT_BI_LSTM, TEXT_BI_GRU,
        TEXT_BI_LSTM, TEXT_GRU, TEXT_LSTM
    ]
    # models=[CNN,FAST]
    for model in models:
        mode_type, best_socre, best_epoch = train(model)
        result.append([mode_type, best_socre, best_epoch])
        predict(model)
    print('| 分类器(Classifier) | val_categorical_accuracy | epochs |')
    print('| :----------------- | :----------------------- | :----- |')
    for r in result:
        print('| {}                | {}                   | {}     |'.format(
            r[0], r[1], r[2]))
    def contour_detect(self, origin_image, threshold):
        # load the image and resize it to a smaller factor so that
        # the shapes can be approximated better
        resized = imutils.resize(origin_image, width=300)
        ratio = origin_image.shape[0] / float(resized.shape[0])
        # convert the resized image to grayscale, blur it slightly,
        # and threshold it
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]
        thresh = 255 - thresh
        # find contours in the thresholded image
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        # loop over the contours
        rects = []
        for c in cnts:
            # compute the center of the contour, then detect the name of the
            # shape using only the contour
            M = cv2.moments(c)
            if M["m00"] != 0:
                cX = int((M["m10"] / M["m00"]) * ratio)
                cY = int((M["m01"] / M["m00"]) * ratio)
            else:
                continue
            shape = self.contour2shape(c)
            # multiply the contour (x, y)-coordinates by the resize ratio,
            # then get the rect
            if shape == "circle":
                c = c.astype("float")
                c *= ratio
                c = c.astype("int")
                x, y, w, h = cv2.boundingRect(c)
                rects.append((x, y, w, h, cX, cY))

        rects = self.remove_overlap(rects)

        for rect in rects:
            x, y, w, h, cX, cY = rect
            crop = origin_image[y - 10:y + h + 10, x - 10:x + w + 10]
            grayImg = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
            pro_con = predict(grayImg)
            # pro_con = 1
            cv2.rectangle(origin_image, (x - 10, y - 10),
                          (x + w + 10, y + h + 10), (0, 255, 0), 2)
            # cv2.putText(origin_image, "circle", (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            temp_text = ""
            if pro_con == 0:
                temp_text = "back"
            if pro_con == 1:
                temp_text = "front"
            cv2.putText(origin_image, temp_text, (cX, cY + 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            coordinate = "(" + str(cX) + ", " + str(cY) + ")"
            cv2.putText(origin_image, coordinate, (cX, cY + 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            # cv2.drawContours(origin_image, [c], -1, (0, 255, 0), 2)
        return origin_image
Пример #28
0
def test_jsonload(newtraining):
    #Change TRAININGDATA temporarily
    #Proof of concept that you can run more interesting tests using simulated data
    TRAININGDATA = newtraining
    assert TRAININGDATA['google.com'] == {"Sergey Brin": "*****@*****.**"}
    assert TRAININGDATA == {"google.com": {"Sergey Brin": "*****@*****.**"}}
    ##Now google.com is just Sergey. So we can predict google addresses now
    assert predict("Paul Irish", "google.com") == "*****@*****.**"
Пример #29
0
def get_labelname(imgpath):
    modelpath = "../05_model/model_cpu.pk"
    model = load_model(modelpath)
    inputdata = convert_data_to_variable_type(imgpath)
    index, p = predict(model, inputdata)
    labellist = ["a", "i", "u", "e", "o"]
    label = evaluate(p, index, labellist)
    return label
Пример #30
0
def predict_btn_clicked():
    ttX = text.get('1.0', 'end')
    prediction = predict(ttX, tfidf_vect, selector, model)
    sentiment = 'Positive' if prediction[0][0] > threshold else 'Negetive'
    senti.set(
        str(sentiment) +
        ' || Probability of the review being a positive sentiment is: ' +
        str(prediction[0][0]))
Пример #31
0
def index(request):
    if request.method == 'POST':
        json_result = json.loads(request.body.decode().replace(
            "'", "\"")).get('name')
        val = predict(json_result)
        return HttpResponse(val)
    else:
        return HttpResponse("请求错误")
Пример #32
0
 def loss(w, z):
     '''Return number, 1/2 the squared distance to nearest centroids.
     '''
     sum_loss = 0
     for row_index in xrange(z.shape[0]):
         x = z[row_index]
         k_star, loss = predict(w, x)
         sum_loss += loss
     return sum_loss
Пример #33
0
def index():
    if request.method == 'POST':
        wav_b64 = request.get_data(as_text=True)
        result = predict(wav_b64[22:])

        print(result)
        print(MMD_scores)

    return render_template('index.html')
Пример #34
0
    def save_to_database(self, row):
        """Save a data row to the database."""
        client = MongoClient('localhost', 27017)
        db = client['fraud']
        collection = db['event']

        prediction = predict(row).tolist()
        row['prediction'] = prediction
        collection.insert_one(row)
def predict_by_all(auction_id):
    LR1 = predict.predict_LR(auction_id)
    LR2 = predict.predict_LR2(auction_id)
    KN = predict.predict_KN(auction_id)

    LDAone = PredictOne.predict(auction_id)
    LDAall = PredictAll.predict(auction_id)

    PredictList = [LR1[0], LR2[0], KN[0], LDAone[0], LDAall[0]]
    return predict(PredictList)
Пример #36
0
def minc_keras(source_dir, target_dir, input_str, label_str, ratios, feature_dim=2, batch_size=2, nb_epoch=10, images_to_predict=None, clobber=False, model_fn='model.hdf5',model_type='custom', images_fn='images.csv',nK="16,32,64,128", n_dil=None, kernel_size=3, drop_out=0, loss='categorical_crossentropy', activation_hidden="relu", activation_output="sigmoid", metric="categorical_accuracy", pad_base=0,  verbose=1, make_model_only=False ):
    
    setup_dirs(target_dir)

    images_fn = set_model_name(images_fn, report_dir, '.csv')
    [images, data] = prepare_data(source_dir, data_dir, report_dir, input_str, label_str, ratios, batch_size,feature_dim, images_fn,pad_base=pad_base,  clobber=clobber)

    ### 1) Define architecture of neural network
    Y_validate=np.load(data["validate_y_fn"]+'.npy')
    nlabels=len(np.unique(Y_validate))#Number of unique labels in the labeled images
    model = make_model(data["image_dim"], nlabels,nK, n_dil, kernel_size, drop_out, model_type, activation_hidden=activation_hidden, activation_output=activation_output)
    if make_model_only : return(0)

    ### 2) Train network on data
    model_fn =set_model_name(model_fn, model_dir)
    history_fn = splitext(model_fn)[0] + '_history.json'

    print( 'Model:', model_fn)
    if not exists(model_fn) or clobber:
    #If model_fn does not exist, or user wishes to write over (clobber) existing model
    #then train a new model and save it
        X_train=np.load(data["train_x_fn"]+'.npy')
        Y_train=np.load(data["train_y_fn"]+'.npy')
        X_validate=np.load(data["validate_x_fn"]+'.npy')
        model,history = compile_and_run(model, model_fn, history_fn, X_train,  Y_train, X_validate,  Y_validate, nb_epoch, nlabels, loss=loss, verbose=verbose)

    ### 3) Evaluate model on test data
    model = load_model(model_fn)
    X_test=np.load(data["test_x_fn"]+'.npy')
    Y_test=np.load(data["test_y_fn"]+'.npy')
    if loss in categorical_functions :
        Y_test=to_categorical(Y_test)
    test_score = model.evaluate(X_test, Y_test, verbose=1)
    print('Test: Loss=', test_score[0], 'Metric=', test_score[1])
    #np.savetxt(report_dir+os.sep+'model_evaluate.csv', np.array(test_score) )

    ### 4) Produce prediction
    #predict(model_fn, validate_dir, data_dir, images_fn, images_to_predict=images_to_predict, category="validate", verbose=verbose)
    #predict(model_fn, train_dir, data_dir, images_fn, images_to_predict=images_to_predict, category="train", verbose=verbose)
    predict(model_fn, test_dir, data_dir, images_fn, loss, images_to_predict=images_to_predict, category="test", verbose=verbose)
    plot_loss(metric, history_fn, model_fn, report_dir)

    return 0
Пример #37
0
    def test_adaline(self):
        loss, predict, update = adaline(self.transform)
        w, x, learning_rate = self.wxlr()

        y = +1
        error = -73
        self.assertAlmostEqual(loss(w, x, y), 0.5 * error * error)
        self.assertEqual(predict(w, x), +1)
        expected_update = w + learning_rate * error * np.array([4, 5, 20])
        self.np_almost_equal(update(w, x, y, learning_rate),
                             expected_update)

        y = -1
        error = -75
        self.assertAlmostEqual(loss(w, x, y), 0.5 * error * error)
        self.assertEqual(predict(w, x), +1)
        expected_update = w + learning_rate * error * np.array([4, 5, 20])
        self.np_almost_equal(update(w, x, y, learning_rate),
                             expected_update)
Пример #38
0
def predict_by_all(auction_id):
	LR1 = predict.predict_LR(auction_id)
	LR2 = predict.predict_LR2(auction_id)
	KN = predict.predict_KN(auction_id)
	
	LDAone = PredictOne.predict(auction_id)
	LDAall = PredictAll.predict(auction_id)
	
	PredictList = [LR1[0],LR2[0],KN[0],LDAone[0],LDAall[0]]
	return predict(PredictList)
def main():
	# print(sys.argv[1])
	if(sys.argv[1]=="train"):
		train()
	elif(sys.argv[1] == "detect"):
		try:
			sys.argv[2]
		except Exception as e:
			print('use "python main.py decect <img_path>" to detect')
		else:
			path   = sys.argv[2]
			img    = loadimg(path)
			th_img = morphology(img)
			cv2.imshow('winname', th_img)
			# cv2.imshow('realimg', img)
			predict(img,th_img)
		# cv2.waitKey(0)
	else:
		print('\n\nuse "python main.py train" to train ')
		print('use "python main.py decect <img_path>" to detect')
Пример #40
0
 def gradient_loss(theta, design, target):
     # optimize around use cases, as speed is important
     if design.ndim == 1:
         error = predict(theta, design) - target
         loss = error * error
         b, w = split(theta)
         grad_b = 2.0 * error
         grad_w = 2.0 * np.dot(error, error)
         grad = np.hstack((grad_b, grad_w))
         return grad, loss
     elif design.ndim == 2:
         num_samples = design.shape[0]
         error = predict(theta, design) - target
         loss = np.dot(error, error) / num_samples
         grad_w = 2.0 * np.sum(error)
         grad_b = 2.0 * np.dot(error, design) / num_samples
         grad = np.hstack((grad_b, grad_w))
         return grad, loss
     else:
         assert design.ndim ==1 or design.ndim == 2
Пример #41
0
def test_jsonload(newtraining):
	#Change TRAININGDATA temporarily
	#Proof of concept that you can run more interesting tests using simulated data
	TRAININGDATA = newtraining
	assert TRAININGDATA['google.com'] == {"Sergey Brin" : "*****@*****.**"}
	assert TRAININGDATA == {
		"google.com" : {
	        "Sergey Brin" : "*****@*****.**"}
	}
	##Now google.com is just Sergey. So we can predict google addresses now
	assert predict("Paul Irish", "google.com") == "*****@*****.**"
Пример #42
0
 def loss_error(theta, design, target):
     '''Return avg. loss and error '''
     yhat = predict(theta, design)
     error = yhat - target
     b, w = split(theta)
     # assume num dimensions is 1 or 2
     num_samples = float(1 if design.ndim == 1 else design.shape[0])
     # use the fact that np.dot() for scalars is defined as multiplicaiton
     loss = \
         (np.dot(error, error) / num_samples) + \
         regularizer_weight * np.dot(w, w)
     return loss, error
Пример #43
0
def main(url_page, Total_comment, vectoriser, LRmodel):

    box_comment = comment_crawl(url_page, Total_comment)
    df = predict(vectoriser, LRmodel, box_comment['comment'])
    #print(df)
    pos = len(df[df['sentiment'] == 1])/len(df)
    neg = len(df[df['sentiment'] == -1])/len(df)

    box_comment['sentiment'] = df['sentiment']
    box_comment.to_csv('Results.csv', index=False)

    print(f'sentiment analysis of {url_page} in {Total_comment} comments: ')
    print(f'positif : {pos*100} % , negatif : {neg*100}')
Пример #44
0
    def test_svm(self):
        regularizer_weight = 0.1
        loss, predict, update = svm(regularizer_weight=regularizer_weight,
                                    transform=self.transform)
        w, x, learning_rate = self.wxlr()

        y = +1
        yhat = predict(w, x)
        self.assertEqual(yhat, +1)
        expected = 1.4
        self.assertAlmostEqual(loss(w, x, y), expected)
        w1 = update(w, x, y, learning_rate)
        expected = w - learning_rate * regularizer_weight * w
        self.np_almost_equal(w1, expected)

        y = -1
        yhat = predict(w, x)
        self.assertEqual(yhat, +1)
        expected = 76.4
        self.assertAlmostEqual(loss(w, x, y), expected)
        w2 = update(w1, x, y, learning_rate)
        expected = np.array([0.5801, 1.4602, 0.9403])
        self.np_almost_equal(w2, expected)
Пример #45
0
def CHARACTERIZER(Theta1, Theta2, inFile):
    # PREPROCESSOR crawl a captcha pic and save it in a csv format => 'X.txt'
    PREPROCESSOR(inFile)

    ## =========== Part 2: Loading X and Parameters =============
    X = loadtxt('./CaptchaLibrary/CHARACTERIZER/X.txt')
    m = X.shape[0]

    #print('Saved Neural Network Parameters are all loaded...\n')
    ## ================= Part 3: Implement Predict =================
    predSTR = 'BCEFGHIJKLMNPRSTUVWXYZ'

    guessSTR = ''
    for i in range(m):
        pred = predict(Theta1, Theta2, X[i, :][newaxis])
        guessSTR = guessSTR + predSTR[pred]
    return guessSTR
Пример #46
0
def test():

  data = sio.loadmat('pro_train.mat')
  X = data['X']
  y = data['y']

  for idx, data in enumerate(y):
    if data == 0:
      y[idx] = 10

  #theta = sio.loadmat('train_res.mat')
  theta = sio.loadmat('train_res2.mat')

  theta_1 = theta['Theta1']
  theta_2 = theta['Theta2']

  print 'start to calc'

  res = predict(theta_1, theta_2, X)

  print 'calc overed'

  print 'start to check'

  n_samples = len(y)
  corr = 0
  for i , data in enumerate(y):
    if res[i] == data:
      corr += 1


  #for idx, data in enumerate(res):
  #  if data == 10:
  #    res[idx] = 0

  #img_id = [i for i in xrange(1, len(res) + 1)]

  #data = np.column_stack((np.array(img_id), np.array(res)))

  #import time

  #with open('res%d.csv' %(int(time.clock() * 10000)), 'w') as f:
  #  f.write('ImageId,Label\n')
  #  np.savetxt(f, data, fmt='%i', delimiter=',')

  print 'Training set accuracy:', float(corr) / n_samples * 100
Пример #47
0
def actual_prog(seq, forward, reverse) :
	query = Amplicon()
	query.setLabel(name)
	query.setSequence(seq)

	#check for non [ACTG] chars
	query.cleanSequence()

	forward = forward.upper()
	reverse = reverse.upper()

	query.setSequence(query.sequence.upper())
	query.setPrimerPair(forward,reverse)

	#check for non [ACTG] chars
	query.cleanSequence()

	#foo = html()
	#foo.cssUp()

	checking = query.checkHybridization(query.getSequence(),query.forward.sequence,query.reverse.sequence)
	query.setSequence(checking[1])


	ampliconList = []
	ampliconList.append(query)

	#query.checkHybridization()
	myPredict = predict()
	myPredict.writeHandleForR(ampliconList,1)
	myListOfEfficiencies = myPredict.predictGam()

	ampliconList[0].setEfficiency(myListOfEfficiencies[0])
	#print name
	effect = round(float("".join(myListOfEfficiencies)),2)
	print str(effect)+" amplicon are synthesized from 1 template at each cycle of the PCR for the this PCR reaction"


	call(["rm", "gamResult.data"])
	call(["rm", "primerDataForR.dat"])
Пример #48
0
    anchore_network = model_triplet['anchore_network']
    feature_size = anchore_network.get_shape()[1]
    # load prediction {which returns 0 for match and 1 for mis-match}
    model_bin = load_model_bin(model_binary_path, feature_size=feature_size, device='/cpu:0')

    binary_gen = BinaryGenerator(cheque_path, finicale_path, train_valid_split_percent=0.75)
    
    process_time_list = []
    labels_list = []
    predict_data = None
    for img_path_list in binary_gen.valid_data(batch_size=batch_size):
        start_time  = time.time()
        anchore_img_list, test_img_list, labels = img_path_list

        if predict_data is None:
            predict_data = np.round(predict(anchore_img_list, test_img_list, model_triplet, model_bin), 2)
            end_time  = time.time()
        else:
            r = np.round(predict(anchore_img_list, test_img_list, model_triplet, model_bin), 2)
            end_time  = time.time()
            predict_data = np.vstack((predict_data, r))

        process_time_list.append(end_time - start_time)
        labels_list += labels
    report = classification_report(labels_list, np.argmax(predict_data, axis=1))
    conf_mat = confusion_matrix(labels_list, np.argmax(predict_data, axis=1))
    print report
    print "---------------------------------------------------------------------------------"
    print conf_mat
    print "----------------------------------------------------------------------------------"
    print "process time taken:", np.mean(process_time_list)
Пример #49
0
def ex2_reg():

    #%% Load Data
    #%  The first two columns contains the exam scores and the third column
    #%  contains the label.
    data = np.loadtxt('data/ex2data2.txt', delimiter=',')
    x = data[:, :2]
    y = data[:, 2]

    # %% ==================== Part 1: Plotting ====================
    #%  We start the exercise by first plotting the data to understand the
    #%  the problem we are working with.
    print(
        'Plotting data with o indicating (y = 1) examples and x indicating (y = 0) examples.\n')
    plotData(x, y)

    plt.xlabel('Microchip Test 1')
    plt.ylabel('Microchip Test 2')
    plt.legend(['y == 1', 'y == 0'], bbox_to_anchor=(1.5, 1))
    plt.show()

    # %% ============ Part 2: Compute Cost and Gradient ============
    #% Add intercept term to x and X_test
    print(x.shape)
    X = mapFeature(x[:, 0], x[:, 1])

    [m, n] = X.shape

    # % Set regularization parameter lambda to 1
    _lambda = 1

    #% Initialize fitting parameters
    initial_theta = np.zeros(n)

    #% Compute and display initial cost and gradient
    # cost = costFunction(initial_theta, X, y)
    # [cost, grad] = costFunctionReg(initial_theta, X, y, _lambda)
    cost = costFunctionReg(initial_theta, X, y, _lambda)
    print('Cost at initial theta (zeros):\n{}'.format(cost))
    print('Gradient at initial theta (zeros):\n{}'.format(initial_theta))
    print(X.shape)

    # ============= Part 3: Optimizing using scipy.optimize
    # %  In this exercise, you will use a function (scipy.optimize.minimize)
    # %  to find the optimal parameters theta.
    fReg = lambda t: costFunctionReg(t, X, y, _lambda)

    # Using minimize()
    # options = {'maxiter': 400, 'disp': True}
    # try other methods `Powell`, `SLSQP`..etc
    # result = minimize(fReg, initial_theta, method='BFGS', options=options)
    # cost = result['fun']
    # theta = result['x']

    # print('\nCost at theta found by minimize(): {}'.format(cost))
    # print('theta: {}'.format(theta))

    # Using fmin_bfgs()
    options = {'full_output': True, 'retall': True}
    theta, cost, _, _, _, _, _, allvecs = fmin_bfgs(
        fReg, initial_theta, maxiter=400, **options)

    print('\nCost at theta found by fmin_bfgs(): {}'.format(cost))
    print('theta: {}'.format(theta))
    # visualizing the cost change
    costs = [fReg(allvecs[i]) for i in range(157)]
    plt.plot(costs)
    plt.title('cost function $y$ per iteration $x$')
    plt.grid()
    plt.show()
    # % Print theta to screen
    print('Cost at theta found by minimize():\n{}\n'.format(cost))
    print('theta: \n')
    print('{}\n'.format(theta))

    # % Plot Boundary
    plotDecisionBoundary(theta, X, y)

    # % Show plot
    plt.xlabel('Microchip Test 1')
    plt.ylabel('Microchip Test 2')
    plt.legend(
        ['y == 1', 'y == 0', 'Decision Boundary'], bbox_to_anchor=(1.5, 1))
    plt.show()
    # % Compute accuracy on our training set
    p = predict(theta, X)
    print('Train Accuracy: {}\n'.format(np.mean(np.double(p == y)) * 100))
Пример #50
0
def index(input_line):
    return {'result': predict(input_line, 10)}
Пример #51
0
 def update(w, x):
     k_star, _ = predict(w, x)
     n[k_star] += 1
     new_w = np.copy(w)
     new_w[k_star] += (x - w[k_star]) / n[k_star]
     return new_w
Пример #52
0
def test_three():
	assert predict("Steve Wozniak", "apple.com") == "*****@*****.**"
Пример #53
0
def main():
    train(0.0001)
    # print predict([55.48216114069585,35.57070347228866])
    predict()
from numpy import *
from plot import *
from util import *
from gradientDescent import *
from predict import *
from scipy.optimize import *
import numpy as np

data = loadtxt('data1.txt', delimiter=',')
X = data[:,0:2]
y = data[:,2]
plot(X,y)

theta = np.zeros(X.shape[1] + 1)
dummy = ones(X.shape[0])
processX = column_stack([dummy, X])
alpha = 1
thetaResult = fmin_bfgs(computeCost, theta, args=(processX, y, alpha), fprime=costGradient)
print thetaResult
print predict(thetaResult, processX, y)
Пример #55
0
 def test_predict(self):
     gradient, loss, predict, update = l1_norm(self.transform)
     self.assertAlmostEqual(predict(self.b, self.w, self.x),
                            93)
     self.assertAlmostEqual(predict(self.b, -self.w, self.x),
                            -91)
Пример #56
0
 def loss(b, w, x, y):
     '''Return loss for sample (x,y) given weights w.'''
     return abs(predict(b, w, x) - y)
Пример #57
0
 def subgradient(b, w, x, y):
     '''Return a subgradient of |error|'''
     error = predict(b, w, x) - y
     return -1 if error < 0 else 1
Пример #58
0
Файл: ex2.py Проект: iamaziz/ml
def ex2():
    #%% Load Data
    #%  The first two columns contains the exam scores and the third column
    #%  contains the label.
    data = np.loadtxt('data/ex2data1.txt', delimiter=',')
    x = data[:, :2]
    y = data[:, 2]
    #%% ==================== Part 1: Plotting ====================
    #%  We start the exercise by first plotting the data to understand the
    #%  the problem we are working with.
    print(
        'Plotting data with o indicating (y = 1) examples and x indicating (y = 0) examples.\n')
    plotData(x, y)

    plt.xlabel('Exam 1 Score')
    plt.ylabel('Exam 2 Score')
    plt.legend(['Admitted', 'Not admitted'], bbox_to_anchor=(1.5, 1))
    plt.show()
    #%% ============ Part 2: Compute Cost and Gradient ============
    #%  In this part of the exercise, you will implement the cost and gradient
    #%  for logistic regression. You neeed to complete the code in costFunction()

    #%  Setup the data matrix appropriately, and add ones for the intercept term
    [m, n] = x.shape

    #% Add intercept term to x and X_test
    ones = np.ones(m)
    X = np.array([ones, x[:, 0], x[:, 1]]).T

    #% Initialize fitting parameters
    initial_theta = np.zeros(n + 1)

    #% Compute and display initial cost and gradient
    cost = costFunction(initial_theta, X, y)
    print('Cost at initial theta (zeros):\n{}'.format(cost))
    # ============= Part 3: Optimizing using fmin() or minimize()
    print('Gradient at initial theta (zeros):\n{}'.format(initial_theta))
    # %  In this exercise, you will use a built-in function (scipy.optimize.fmin) to find the
    # %  optimal parameters theta.
    f = lambda t: costFunction(t, X, y)  # %  Set options for fmin()
    fmin_opt = {'full_output': True, 'maxiter': 400, 'retall': True}
    # %  Run fmin to obtain the optimal theta
    theta, cost, iters, calls, warnflag, allvecs = fmin(
        f, initial_theta, **fmin_opt)
    print('Cost at theta found by fmin(): {}'.format(cost))
    print('theta: {}'.format(theta))

    # %  Set options for minimize()
    # mini_opt = {'maxiter': 400, 'disp': True}
    # %  Run minimize to obtain the optimal theta
    # results = minimize(f, initial_theta, method='Nelder-Mead', options=mini_opt)
    # cost = results['fun']
    # theta = results['x']
    # print('Cost at theta found by minimize(): {}'.format(cost))
    # print('theta: {}'.format(theta))

    cost_change = [costFunction(allvecs[i], X, y) for i in range(156)]
    plt.plot(cost_change)
    plt.grid()
    plt.title('cost function $y$ per iteration $x$')
    plt.show()  # % Print theta to screen
    print('Cost at theta found by fmin:\n{}\n'.format(cost))
    print('theta: \n')
    print('{}\n'.format(theta))

    # % Plot Boundary
    plotDecisionBoundary(theta, X, y)

    # % Show plot
    plt.show()

    # %% ============== Part 4: Predict and Accuracies ==============
    # %  After learning the parameters, you'll like to use it to predict the outcomes
    # %  on unseen data. In this part, you will use the logistic regression model
    # %  to predict the probability that a student with score 45 on exam 1 and
    # %  score 85 on exam 2 will be admitted.
    # %
    # %  Furthermore, you will compute the training and test set accuracies of
    # %  our model.
    # %
    # %  Your task is to complete the code in predict.m

    # %  Predict probability for a student with score 45 on exam 1
    # %  and score 85 on exam 2

    scores = np.array([1, 45, 85])
    prob = sigmoid(np.dot(scores, theta))
    print(
        'For a student with scores 45 and 85, we predict an admission probability of:\n{}\n\n'.format(prob))

    # % Compute accuracy on our training set
    p = predict(theta, X)

    print('Train Accuracy: {}\n'.format(np.mean(np.double(p == y)) * 100))
Пример #59
0
def test_four():
	assert predict("Barack Obama", "whitehouse.gov") == "Unknown. No email address with that domain in our records."
Пример #60
0
from sgd_train import sgd_optimization_mnist
from predict import *

if __name__ == '__main__':
	# train
    sgd_optimization_mnist()

    # predict
    predict()