Exemplo n.º 1
0
 def predict_number(self):
     all_pts = []
     for w in self.canvas.children:
         if isinstance(w, Line):
             wpts = np.array(w.points)
             xs = wpts[::2]
             ys = wpts[1::2]
             try:
                 tck, u = interpolate.splprep([xs, ys], s=0)
             except Exception as e:
                 print('warn', e)
                 continue
             u_new = np.arange(np.min(u), np.max(u), 0.01)
             out = interpolate.splev(u_new, tck)
             all_pts += out
     
     img = np.zeros((560+10, 560+10)).astype(np.uint8)
     polyx = np.array(all_pts[::2]).astype(np.int)
     polyy = np.array(all_pts[1::2]).astype(np.int)
     R = 10
     from itertools import product
     for xs, ys in zip(polyx, polyy):
         for x, y in zip(xs, ys):
             circle = [(x+i, y+j) for (i, j) in product(range(-R,
                                                              R+1), repeat=2) if i**2+j**2 < R**2]
             for c in circle:
                 img[c[0], c[1]] = 128
     img = imresize(np.rot90(img), (28, 28),
                    interp='bilinear').astype('f')/np.max(img)
     plt.imshow(img, cmap='gray')
     plt.show()
     predict(img)
Exemplo n.º 2
0
    def predict_number(self):
        all_pts = []
        for w in self.canvas.children:
            if isinstance(w, Line):
                wpts = np.array(w.points)
                xs = wpts[::2]
                ys = wpts[1::2]
                try:
                    tck, u = interpolate.splprep([xs, ys], s=0)
                except Exception as e:
                    print('warn', e)
                    continue
                u_new = np.arange(np.min(u), np.max(u), 0.01)
                out = interpolate.splev(u_new, tck)
                all_pts += out

        img = np.zeros((560 + 10, 560 + 10)).astype(np.uint8)
        polyx = np.array(all_pts[::2]).astype(np.int)
        polyy = np.array(all_pts[1::2]).astype(np.int)
        R = 10
        from itertools import product
        for xs, ys in zip(polyx, polyy):
            for x, y in zip(xs, ys):
                circle = [(x + i, y + j)
                          for (i, j) in product(range(-R, R + 1), repeat=2)
                          if i**2 + j**2 < R**2]
                for c in circle:
                    img[c[0], c[1]] = 128
        img = imresize(np.rot90(img),
                       (28, 28), interp='bilinear').astype('f') / np.max(img)
        plt.imshow(img, cmap='gray')
        plt.show()
        predict(img)
Exemplo n.º 3
0
def plotting(itemlist):
    x = [row[0] for row in itemlist]
    y = [row[1] for row in itemlist]
    plt.axis((0, 800, 600, 0))
    plt.plot(x, y)
    plt.savefig('foo1.jpeg')
    try:
        testcrop.CropImage()
        predictor.predict()
    except IndexError:
        print("Draw Again!!!")
        pass
    plt.close()
Exemplo n.º 4
0
def plotting(itemlist, model, session, saver, save_path, hyperparams):
    x = [row[0] for row in itemlist]
    y = [row[1] for row in itemlist]
    plt.axis((0, 800, 600, 0))
    plt.plot(x, y)
    plt.savefig('foo1.jpeg')
    try:
        testcrop.CropImage()
        predictor.predict(model, session, saver, save_path, hyperparams)
    except IndexError:
        print("Draw Again!!!")
        pass
    plt.close()
Exemplo n.º 5
0
def home():
    if request.method == 'POST':
        req = request.form['input']
        res = predictor.predict(req)
        return render_template('submit.html', res=res)
    else:
        return render_template('submit.html')
Exemplo n.º 6
0
async def handle_docs_photo(message):
    chat_id = message.chat.id

    if message.media_group_id is None:
        # Get user's variables
        user_name = message.from_user.first_name
        user_id = message.from_user.id
        message_id = message.message_id
        text = WAITING_TEXT % user_name
        logging.info(f'{user_name, user_id} is knocking to our bot')
        await bot.send_message(chat_id, text)

        # Define input photo local path
        photo_name = './input/photo_%s_%s.jpg' % (user_id, message_id)
        await message.photo[-1].download(
            photo_name)  # extract photo for further procceses

        #Photo processing
        photo_output, text = predict(photo_name)
        await bot.send_photo(chat_id, photo_output)
        output_text = []
        for i in text:
            output_text.append(CLASSES_DICT[i])
        output_text = '\n\n'.join(output_text)
        await bot.send_message(chat_id, output_text)

    else:
        text = NOT_TARGET_TEXT % user_name
        await message.reply(text)
Exemplo n.º 7
0
def main():
    ap = argparse.ArgumentParser(
        description='This script allow to predict using a pre-trained model')

    ap.add_argument(
        'image_path',
        default='/home/workspace/paind-project/flowers/test/1/image_06752.jpg')
    ap.add_argument('checkpoint',
                    default='/home/workspace/paind-project/checkpoint.pth')
    ap.add_argument('--top_k', dest='top_k', type=int, default=5)
    ap.add_argument('--category_names',
                    dest='category_names',
                    default='cat_to_name.json')
    ap.add_argument('--gpu', dest='gpu', action='store_true')

    args = ap.parse_args()

    if args.image_path and args.checkpoint:
        model = checkpoint.load(checkpoint_path=args.checkpoint, gpu=args.gpu)

        probs, classes = predictor.predict(image_path=args.image_path,
                                           model=model,
                                           top_k=args.top_k,
                                           gpu=args.gpu)

        with open(args.category_names, 'r') as json_file:
            cat_to_name = json.load(json_file)

        labels = list(cat_to_name.values())
        classes = [labels[x] for x in classes]

        for c, p in zip(classes, probs):
            print(c, p)
Exemplo n.º 8
0
def index():
    if "file_urls" not in session:
        session['file_urls'] = []
    if "result1" not in session:
        session['result1'] = []
    if "positive" not in session:
        session['positive'] = []
    full_result = session['result1']
    full_file = session['file_urls']
    full_positive = session['positive']
    if request.method == 'POST':
        file_obj = request.files
        for f in file_obj:
            file = request.files.get(f)
            filename = photos.save(file, name=file.filename)
            print(filename)
            result1, file_urls, positive = predict([filename])
            full_result = full_result + result1
            full_file = full_file + file_urls
            full_positive = full_positive + positive
        session['file_urls'] = full_file
        session['result1'] = full_result
        session['positive'] = full_positive
        print('COMPLETE')
        return "Uploading..."
    return render_template('index.html')
Exemplo n.º 9
0
def scorer():
	id_number = None
	form = NameForm()
	my_dict = None
	pred, diff, no_assumptions, assumptions, title = (None, 
		None, None, None, None)


	if form.validate_on_submit():
		id_number = form.id_number.data
		form.id_number.data = ''
		pred, diff, no_assumptions, assumptions,title = predict(
			id_number, dict90,'../pipeline/data/')
		my_dict = assumptions
		

	attrs = ['bed','bath','feet','dog','cat','content',
	'getphotos','hasmap','housingtype','lat','long','laundry',
	'parking','price','smoking','wheelchair']

	
	
	return render_template('scorer.html', id_number=id_number, attrs=attrs,
		my_dict=my_dict,form=form,pred=pred, diff=diff, 
		no_assumptions=no_assumptions,assumptions = assumptions,title=title)
Exemplo n.º 10
0
def get_images():
    unet = skimage.io.imread(
        '/Users/arianrahbar/Dropbox/Unet/OutProbs/1_crop.png')
    mrcnn = cv2.imread('/Users/arianrahbar/Dropbox/Mrcnn/OutLabels/1_crop.png',
                       cv2.IMREAD_GRAYSCALE)
    unet = pred.predict(unet)
    return unet, mrcnn
Exemplo n.º 11
0
def send():
    if request.method == 'POST':
        url = request.form['url']
        predict = predictor.predict(url)
        return render_template('index.html', predict=predict)

    return render_template('form.html')
Exemplo n.º 12
0
def main(args):
    normal_class = args.normal_class
    anomal_classes = args.anomal_classes
    train_dataset, test_dataset = get_cifar_datasets(normal_class,
                                                     anomal_classes)
    print(f"train dataset length: {len(train_dataset)}")
    print(f"test dataset length: {len(test_dataset)}")

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Using {} device'.format(device))

    feature_extractor_version = args.feature_extractor_version  # resnet18, 'resnet34', 'resnet50', resnet101, resnet 152
    feature_extractor = torch.hub.load('pytorch/vision:v0.9.0',
                                       feature_extractor_version,
                                       pretrained=True)
    feature_extractor.to(device)

    train_set, test_set = get_feature_space(device, feature_extractor,
                                            train_dataset, test_dataset)
    accuracy, tpr, fpr, recall, precision = predict(train_set, test_set)

    print(f"feature extractor: {feature_extractor_version}")
    print(f"normal class: {normal_class}")
    print(f"anomal classes: {anomal_classes}")
    print(f"accuracy: {accuracy}")
    print(f"tpr: {tpr}, fpr: {fpr}")
    print(f"recall: {recall}, precision: {precision}")
Exemplo n.º 13
0
def output():
    try:
        json = request.get_json()
        input_text = json["text"]
        return predict(input_text)
    except Exception as e:
        return f"An error Occured: {e}"
Exemplo n.º 14
0
def parse_files():
    bucket = gcs_client.get_bucket(BUCKET_NAME)
    blobs = bucket.list_blobs()

    Logger.log_writer("Reading bucket files....")

    for blob in blobs:

        Logger.log_writer("fileName:" + blob.name)

        if blob.name == "LTC.csv":

            Logger.log_writer("Downloading...")

            blob.download_to_filename(blob.name)

            Logger.log_writer("Downloaded! Predicting...")

            last_value, prediction, market_cap = predictor.predict(blob.name)

            Logger.log_writer(
                "Created a prediction for:{0} last_value:{1} prediction:{2} marketcap:{3}"
                .format(blob.name, last_value, prediction, market_cap))

            save_prediction(blob.name, last_value, prediction, market_cap)
Exemplo n.º 15
0
def onClick():
    global message, file, image, img, graph
    #os.system("script2.py "+ rain.get() +" " + temp.get()+ " " + Population_density.get())
    # filo = open("private 2.txt", "r")
    value = predictor.predict(city.get(), tempe.get(), rain.get(),
                              pop_den.get())
    # print(value)
    ########### MAKING GRAPH ###############################
    file = "C:\\Users\\acer\\Desktop\\programme_codes\\disease-outbreaks-predictor\\output.png"
    image = Image.open(file)
    image = image.resize((1450, 450), Image.ANTIALIAS)
    img = ImageTk.PhotoImage(image)

    graph = Label(lowerFrame, image=img)
    graph.grid(row=0, column=0, columnspan=14, pady=2, padx=40)

    out1 = str(int(value[0]))
    message = "The predicted disease count of next year is  " + out1
    answer["text"] = message
    temp_contri["text"] = "Current Temperature contribution is: " + str(
        value[1][0] / (sum(value[1])) * 100)[:5]
    rain_contri["text"] = "Current Rainfall contribution is: " + str(
        value[1][1] / (sum(value[1])) * 100)[:5]
    pop_contri["text"] = "Current  Population Density contribution is: " + str(
        value[1][2] / (sum(value[1])) * 100)[:5]
Exemplo n.º 16
0
def detect():
	redditURL = request.form['redditpost']
	print(redditURL)
	predicted_flair = str(predict(str(redditURL)))
	actualflair = str(FlairActual(str(redditURL)))
	print(predicted_flair, actualflair)
	return render_template('index.html', predicted_flair = predicted_flair, actualflair = actualflair)
Exemplo n.º 17
0
    def snapshot(self):
        ret, frame = self.vid.get_frame()

        if ret:
            cv2.imwrite("capture.jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
            result = predict()
            print(result)
            self.selectMusic(result)
Exemplo n.º 18
0
def get_tweets_forAgency(agency):
    for tweet in tweets:
        text = str(tweet[2])
        if "b'RT" not in text:
            sentiment = TextBlob(text).sentiment.polarity
            if sentiment <= 0:
                if agency == predict(text):
                    print(text)
Exemplo n.º 19
0
def predict():
    sex = request.args["sex"]
    age = request.args["age"]
    beh_id = int(request.args["beh_id"])
    zip_code = int(request.args["zip_code"])

    ans = predictor.predict(sex, age, beh_id, zip_code)
    return json.dumps(ans)
Exemplo n.º 20
0
def main():
    graph = Graph()
    graph.saveGraph()
    root = Tk()
    probabilityIncrease, probabilityDecrease = predictor.predict()
    gui = MyGUI(probabilityIncrease, probabilityDecrease, root)
    gui.appOpen()
    root.mainloop()
Exemplo n.º 21
0
def save():
    filename = "image.png"
    # global image1
    # image1 = PIL.ImageOps.invert(image1)
    image1.save(filename)
    predicted = predict()
    print(predicted)
    root.title(predicted)
Exemplo n.º 22
0
def compare_scans(request):
    last_two_scans = ImageModel.objects.all().order_by('-id')[:2]
    print(last_two_scans)
    file1 = last_two_scans[0].get_name()
    file2 = last_two_scans[1].get_name()
    path1 = "/home/prajwala/Videos/oct_analyzer"+settings.MEDIA_URL+file1
    path2 = "/home/prajwala/Videos/oct_analyzer"+settings.MEDIA_URL+file2
    results_path =  "/home/prajwala/Videos/oct_analyzer"+settings.MEDIA_URL+"results/"
    first,name1 = file1.split("/")
    first,name2 = file2.split("/")
    result1 = predict(path1,results_path,name1)
    result2 = predict(path2,results_path,name2)
    context = {'file1':file1,'file2':file2}
    results = {'result1':result1,'result2':result2}
    
    plot_hist(result1,result2)
    return render(request,'uploader/compare.html',{'context':context,'results':results})
Exemplo n.º 23
0
def home():
    if request.method == 'POST':
        req = request.json
        print(req)
        content = req['text']
        res = predictor.predict(content)
        return jsonify({'generatedText': res})
    else:
        return jsonify({'generatedText': 'hi'})
Exemplo n.º 24
0
def get_prediction(calendar, schedule):
	schedule = get_schedule(schedule)
	try:
		weekday = [ep for ep in schedule['event_plans'] if ep['event_plan'] == 'Weekday'][0]
		min_temp = weekday['min_temp_business_hours']
		temp = min_temp if min_temp !="" else "20"
	except:
		temp = "20"
	return predictor.predict(temp)
Exemplo n.º 25
0
def predict():
    review_text = request.form['review']
    cleaned = predictor.clean_str(review_text)
    rating = predictor.predict(review_text)

    return render_template('prediction.html',
                           review=review_text,
                           cleaned=cleaned,
                           rating=rating)
Exemplo n.º 26
0
def receiver():
    if request.method == 'POST':
        if 'imagefile' in request.files:
            img = request.files.get('imagefile', '')
            response = model.predict(img)
            return {'response': response}

    elif request.method == 'GET':
        return redirect('/')
Exemplo n.º 27
0
def handle_text(text):
    # predict
    prediction = predict(model, text)

    # save to firebase
    saveToFirebase(text, prediction)

    # return
    return prediction
Exemplo n.º 28
0
def sentiment_analyzer():
    '''
    Receives company name and send back json response ['Positive', 'Negative']
    '''
    company = request.form['company']

    res = predict(company).resp()

    return jsonify(res)
Exemplo n.º 29
0
def partition(remaining, swift, container_name='videos', file_list=None):
    """ Naive partitioning algorithm for figuring out which workloads can go
    on each VM. This is done using the predict machine learning algorithm on
    each of the files using the index file written earlier during the ingest
    portion of the program.

    Note: I (the person writing the docstrings) did not create this
    algorithm, so am unsure exactly of what's going on here. The lines and
    comments were left mostly as I found them with some minor formatting
    changes. Contact Ruben Madera (https://github.com/Roastmaster) for more
    info.
    """
    if not file_list: 
        container_data = []
        for data in swift.get_container(container_name)[1]:
            container_data.append('{0}\t{1}'.format(data['name'], data['bytes']))
        container_data = [token.split('\t') for token in container_data]

        # Use a list comprehension to create a list of all the file names
        file_list = []
        try:
            file_list = [token[0] for token in container_data]
        except IndexError:
            print "IndexError: Container empty"
    
    # Where we store the partitioned list of videos.
    # Internal lists separate what is possible to transcode in time on one VM
    partitioned_video_list = []

    # Given a time-until-completion by Joe's look up table, we keep
    # decrementing "time_until_deadline" by these times until it reaches
    # zero, then, create a new list (representing a new vm), and repeat.
    tmp_t_u_d = remaining
    print "Time Remaining:", predictor.prettify_time(remaining)
    single_vm_capacity = []
    for video in file_list:
        single_vm_capacity.append(video)
        prediction_time = predictor.predict(video)
        if prediction_time > remaining:
            print "WARNING:  File is too big to be transcoded by VM in time."
            partitioned_video_list.append(single_vm_capacity)
            single_vm_capacity = []
            tmp_t_u_d -= prediction_time
            continue

        if tmp_t_u_d - prediction_time > 0:
            tmp_t_u_d -= prediction_time
            if video == file_list[-1]:
                partitioned_video_list.append(single_vm_capacity)

        else:
            tmp_t_u_d = remaining
            partitioned_video_list.append(single_vm_capacity)
            single_vm_capacity = []

    return partitioned_video_list
Exemplo n.º 30
0
def recognize(img_path, rect_list):
    os.chdir(attention_root)
    boxes = []
    for rect in rect_list:
        box = (rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
        boxes.append(box)

    words = predictor.predict(img_path, boxes)
    os.chdir(cwd)
    return words
Exemplo n.º 31
0
def process_images(input, output):
    if os.path.isdir(output):
        shutil.rmtree(output)
    os.makedirs(output, exist_ok=True)
    for segmentation in os.listdir(input):
        os.makedirs(os.path.join(output, segmentation), exist_ok=True)
        for filename in os.listdir(os.path.join(input, segmentation)):
            img = imread(os.path.join(input, segmentation, filename))
            prediction = predict(img)
            imwrite(os.path.join(output, segmentation, filename), prediction)
Exemplo n.º 32
0
def resultCancer():
    if request.method == 'POST':
        gene = request.form["Gene"]
        variation = request.form['Variation']
        text = request.form['Text']

        wCancer.writerow([gene, variation, text])
        re = predictor.predict()

        return render_template("resultCancer.html", result=re)
Exemplo n.º 33
0
def main():
    s = time.time()
    train = pd.read_csv("train.csv")
    test = pd.read_csv("test.csv")
    clean(train)
    clean(test)

    print("Data processed after " + str(time.time() - s) + " sec")

    #-----------Predictor-------------------------------
    tfidf_bag = bag(train.tweet)
    predictor.predict(tfidf_bag, train.label)
    #---------------------------------------------------

    #-----------Clustering------------------------------
    hateful = train.copy(deep=True)
    get_hateful(hateful)
    kmeans_model, vectorizer = classifier.train(hateful, False)
    print("Clusters Found after " + str(time.time() - s) + " sec")
def predict():
    #Symptoms are set as a CSV list:
    symptoms_csv = request.form['symptoms']
    symptoms = symptoms_csv.split(',')
    symptoms = symptoms[:-1]  #Remove empty entry at end
    #Age:
    age = int(request.form['age'])
    #Make prediction:
    diagnosed_disease = predictor.predict(symptoms, age)

    #Connect to database:
    dbConn = MySQLdb.connect('localhost', 'project', 'project', 'cse3002')
    dbCursor = dbConn.cursor()

    #Get recommended drugs for disease:
    dbCursor.execute('select drugs_list from drugs where disease_name=%s',
                     (diagnosed_disease, ))
    drugs_string = dbCursor.fetchone()[0]
    #Parse list:
    drugs_string = drugs_string.split(':')
    final_dstring = ""
    for i in range(len(drugs_string) - 1):
        d = drugs_string[i]
        drug = d.split(',')
        final_dstring = final_dstring + drug[0]
        if drug[1]:
            final_dstring = final_dstring + ' (Prescription Required)'
        final_dstring = final_dstring + ', '
    if final_dstring == '':
        final_dstring = 'None'

    #Get session ID:
    session_id = request.cookies.get('session_id')
    #Check if user is anonymous:
    if session_id != 'ANONYMOUS':
        #Get current date and time:
        diag_datetime = str(datetime.datetime.now()).split('.')[0]
        #Get username:
        dbCursor.execute('select username from login where session_id=%s',
                         (session_id, ))
        username = dbCursor.fetchone()[0]
        #Commit record to database:
        dbCursor.execute(
            'insert into diagnoses values(%s, %s, %s, %s)',
            (username, diag_datetime, diagnosed_disease, symptoms_csv[:-1]))
        dbConn.commit()

    #Close DB connection:
    dbConn.close()
    #Return results:
    return render_template("diagnosis.html",
                           disease_name=diagnosed_disease,
                           symptoms=symptoms,
                           age=age,
                           drugs=final_dstring)
Exemplo n.º 35
0
def processor(r_queue, s_queue):
    while True:
        data = r_queue.get()
        ip,entry = data.split('\t')
        dic = transform(entry)
        #process data
        cls = predictor.predict(dic)
        result = map_pt[cls]
        data = ip+'\t'+result
        s_queue.put(data)
        print 'from\t'+ip+'\t\tpredict\t'+result
Exemplo n.º 36
0
def predict():
    rawImage = request.files['file']
    X = predictor.readAndNormalizeImg(rawImage)

    preds = predictor.predict(X)
    trueLabel = util.LABEL_DICT[preds[0]]

    data = {"prediction": trueLabel}
    response = jsonify(data)

    return response
Exemplo n.º 37
0
def classify():
    t = timer()
            
    """ choose here your c, gamma range """
    for c in frange(0.1, 1.5, 0.1):
        #c = 32;
        for gamma in frange(0.1, 1.5, 0.1):
            
            print "### combination ###"
            print c, gamma
            
            
            print "### build model ###"
            sys.stdout.flush()
            t.next()
            model_creator.build_model("tmps.arff", c, gamma)
            sys.stdout.flush()
            print t.next()
            print "### predict ###"
            t.next()
            predictor.predict("tmps_independent.arff")
            print t.next()
            sys.stdout.flush()
Exemplo n.º 38
0
def main(argv):

    date = "20180302"

    file = open("../../server/data/" + date, "r")
    lines = file.readlines()
    file.close()

    for i in range(len(lines)):
        line = lines[i]
        tokens =  line.split(",")
        time = tokens[0]
        result = predictor.predict(date, time)
        process_result(tokens, time, result)
def estimate():
    if request.method == 'POST':
      try:
          data = ast.literal_eval(request.data.decode("utf-8"));
          data = data["input"];
          vec,ans = predictor.predict(data);
          print(vec);
          rv = json.dumps({"vec":vec,"ans":str(ans)});
          return(rv);
      except Exception as ex:
          print(str(ex));
          return(str(ex)); 
      except Error as er:
          print(str(er));
          return(str(er));
Exemplo n.º 40
0
def partition_workload(time_until_deadline, swiftclient, container_name, file_list = None):
    if not file_list: 
        container_data = []
        for data in swiftclient.get_container(container_name)[1]:
            container_data.append('{0}\t{1}'.format(data['name'], data['bytes']))
        container_data = [token.split('\t') for token in container_data]

        # use a list comprehension to create a list of all the filenames
        file_list = []
        try:
            file_list = [token[0] for token in container_data]
        except IndexError:
            print "IndexError: Container empty"

    
    # where we store the partitioned list of videos.
    # Internal lists seperate what is possible to transcode in time on one VM
    partitioned_video_list = []

    # given a time-until-completetion by joe's look up table, we keep decrementing "time_until_deadline" by 
    # these times until it reaches zero, then, create a new list (representing a new vm), and repeat. 
    tmp_t_u_d = time_until_deadline
    print "Time Remaining:", predictor.prettify_time(time_until_deadline)
    single_vm_capacity = []
    for video in file_list:
        single_vm_capacity.append(video)
        prediction_time = predictor.predict(video)
        if (prediction_time > time_until_deadline):
            print "WARNING:  File is too big to be transcoded by VM in time."
            partitioned_video_list.append(single_vm_capacity)
            single_vm_capacity = []
            tmp_t_u_d -= prediction_time
            continue

        if (tmp_t_u_d - prediction_time > 0):
            tmp_t_u_d -= prediction_time
            if (video == file_list[-1]):
                partitioned_video_list.append(single_vm_capacity)

        else:
            tmp_t_u_d = time_until_deadline
            partitioned_video_list.append(single_vm_capacity)
            single_vm_capacity = []

    return partitioned_video_list
Exemplo n.º 41
0
def update():
	predictor.predict()
Exemplo n.º 42
0
def predict():
    source    = request.params.source
    sentences = predictor.predict(source)
    response.set_header('Access-Control-Allow-Origin', '*')
    return {"sentences": sentences}
Exemplo n.º 43
0
def question_tags():
    question = request.json['question']
    return predict(question)
Exemplo n.º 44
0
def main():
    poke_name = flask.request.args.get('name')
    res = predictor.predict(poke_name)
    return json.dumps(res, ensure_ascii=False, indent=2)