コード例 #1
0
def index():
    form = QueryForm()
    if form.validate_on_submit():
        flash('Query required'.format(
            form.query.data
        ))
    return render_template('index.html', title='Home', form=form)
コード例 #2
0
def query():
    form = QueryForm()
    if form.validate_on_submit():
        # flash('Search phrase "{}" is going to be analyzed.'.format(form.query.data))
        query = form.query.data
        session['query'] = query = form.query.data
        return redirect(url_for('statistics'))
    return render_template('query.html', title='Analyze Tweets', form=form)
コード例 #3
0
def index():
    '''
    Handles the route for the index page.\n
    Serves the html and handles the requests for the flask wtf forms\n
    on the index page used to input the server ip and port.
    '''
    form = QueryForm()
    if form.validate_on_submit():
        return redirect(f'/query/{form.server_ip.data}:{form.server_port.data}/')
    return render_template('form.html', form=form)
コード例 #4
0
ファイル: routes.py プロジェクト: C3watts/microblog
def query():
    form = QueryForm()
    if form.validate_on_submit():
        campaign_number = form.campaign_number.data
        table_name = form.table_name.data
        start_time = form.start_time.data
        end_time = form.end_time.data
        url = 'http://localhost:5000/datatable?campaign_number=%s&table_name=%s&start_time=%s&end_time=%s' % (
            campaign_number, table_name, start_time, end_time)
        return redirect(url)
        #flash()
    return render_template('query.html', title='Input Query', form=form)
コード例 #5
0
ファイル: routes.py プロジェクト: rstur3545/truckStock
def query():
   # selectedpart=[Partnum(partnumber='wr30x10093', location='v',quantity=5,description='none'),Partnum(partnumber='rt30x10093', location='va',quantity=56,description='norne')]
    selectedpart=[]
    form1 = QueryForm()
    if form1.validate_on_submit():        
        partnumber = request.form['partnumber'] 
        location = request.form['location']
        if partnumber:
            selectedpart = Partnum.query.filter_by(partnumber=partnumber).all()
        elif location:
            selectedpart = Partnum.query.filter_by(location=location).all()

        return render_template('query.html', form1=form1,selectedpart=selectedpart,location=location)
    return render_template('query.html', form1=form1, selectedpart=selectedpart)    
コード例 #6
0
def facebook():
    form = QueryForm()
    if form.validate_on_submit():
        your_name = form.my_username.data
        user_requested = form.username.data
        chat_counts = get_chats_and_messages(user_requested)
        message_count = get_message_count(user_requested)
        chat_participation = get_chat_participation(user_requested)
        chat_count_dict = get_chat_count_dict()
        total_p = sum(chat_participation.values())
        my_part = get_chat_participation(your_name)
        return render_template('count.html', title= 'Facebook Message Counter', your_name = your_name, message_count = message_count, other_user = user_requested, chat_counts = chat_counts, chat_participation = chat_participation, chat_count_dict = chat_count_dict, total_p = total_p, my_part = my_part)
        #redirect(url_for('index'))
    return render_template('facebook.html', title='Facebook Message Counter', form=form)
コード例 #7
0
def index():
    form = QueryForm()

    # form submission actions
    if form.validate_on_submit():
        # query yelp api
        yelp_results = search(API_KEY, form.food_type.data, form.location.data)
        total = len(yelp_results['businesses'])

        # if no results, redirect to error page
        if total == 0:
            return render_template('sorry.html')
        else:
            rand_restaurant = random.randint(0, total - 1)
            return render_template(
                'results.html',
                yelp_results=yelp_results['businesses'][rand_restaurant])

    return render_template('index.html', form=form)
コード例 #8
0
def index():
    result = db.session.query(summoners).all()
    form = QueryForm()
    form.summoner.choices = [(r.account_id, r.summoner_name) for r in result]
    if form.validate_on_submit():
        account_id = form.summoner.data

        queue = form.queue.data
        begin_date = form.begin_date.data
        # begin date is an optional field and may not have a value
        if begin_date is None:
            time = 0
        else:
            time = datetime.datetime(year=int(begin_date.year), month=int(begin_date.month), day=int(begin_date.day))\
                .timestamp() * 1000

        stat = form.stat.data

        stat_total = stat_fetcher.get_stat_total(stat, account_id, time, queue)
        if stat in stat_fetcher.by_champion_stats:
            stat_by_champ = stat_fetcher.get_stat_by_champ(stat, account_id, time, queue)

            stat_by_champ_arr = []
            for r in stat_by_champ:
                stat_by_champ_arr.append({
                    'champion': champion_parser.get_champion_name(r.champion),
                    'stat': r.stat,
                    'amount': r.amount
                })
        else:
            stat_by_champ_arr = None

        # these parse the second part of the tuples in the choices that are more human readable
        stat_desc = [item[1] for item in form.stat.choices if item[0] == form.stat.data][0]
        summoner_name = [item[1] for item in form.summoner.choices if item[0] == account_id][0]
    else:
        stat_total = None
        stat_by_champ_arr = None
        stat_desc = None
        summoner_name = None
    return render_template('index.html', form=form, stat_total=stat_total, stat_by_champ=stat_by_champ_arr,
                           stat_desc=stat_desc, summoner_name=summoner_name)
コード例 #9
0
ファイル: queries.py プロジェクト: lawr3nc/artwork_prov
def runQuery():
    form = QueryForm()
    results = []
    if form.validate_on_submit():
        querytype = form.querytype.data
        prov_docdb = ArtID.query.filter_by(artname=form.artname.data).first()
        if prov_docdb is None:
            flash('There is no provenance for this artwork')
        else:
            prov_doc = api.document.get(prov_docdb.docid)
            provn_doc = prov_doc.prov
            currentdir = os.getcwd()
            filename = form.artname.data.replace(" ", "")
            outputfile = currentdir + '/app/rdfqueryfiles/' + filename + '.rdf'
            provn_doc.serialize(outputfile, format='rdf', rdf_format='ttl')
            querystr = ""
            if querytype == 'location':
                querystr = location
            elif querytype == 'specialist':
                querystr = specialist
            elif querytype == 'hammerprice':
                querystr = hammerprice
            elif querytype == 'auctionhouse':
                querystr = auctionhouse
            elif querytype == 'gallery':
                querystr = gallery
            elif querytype == 'buyer':
                querystr = buyer
            g = rdflib.Graph()
            g.parse(outputfile, format='n3')
            qres = g.query(querystr)
            for row in qres:
                results.append(row)
    return render_template('query.html',
                           title='Queries',
                           form=form,
                           results=results)
コード例 #10
0
ファイル: routes.py プロジェクト: antoniouaa/weather_app
def index():
    form = QueryForm()
    if form.validate_on_submit():
        return redirect(
            url_for("stats_fetch", location=f"{form.location.data}"))
    return render_template("index.html", title="Home", form=form)
コード例 #11
0
def index():
    print(app.root_path)
    form = QueryForm()
    if form.validate_on_submit():

        #search_term="Jimmy Hendrix"
        search_term = form.the_wik_search.data
        wik_page = wikipedia.search(search_term, results=1)
        try:
            p = wikipedia.page(wik_page[0])
        except wikipedia.exceptions.DisambiguationError as e:
            #print(e.options)
            p = wikipedia.page(e.options[0])

        paragraph_text = p.content  # Content of page.
        #paragraph_text=paragraph_text[:350]#
        wik_url = p.url
        #print(wik_url)

        #query="When was he born?"
        query = form.the_query.data

        #paragraph_text=form.the_document.data

        def is_whitespace(c):
            if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(
                    c) == 0x202F:
                return True
            return False

        doc_tokens = []
        char_to_word_offset = []
        prev_is_whitespace = True
        for c in paragraph_text:
            if is_whitespace(c):
                prev_is_whitespace = True
            else:
                if prev_is_whitespace:
                    doc_tokens.append(c)
                else:
                    doc_tokens[-1] += c
                prev_is_whitespace = False
            char_to_word_offset.append(len(doc_tokens) - 1)

        doc_tokens = doc_tokens[:1000]

        # total_num_doc_tokens=len(doc_tokens)
        # cutup_doc_tokens=[]
        # start_token_inds=np.arange(0,total_num_doc_tokens-500,250).tolist()
        # num_batches=len(start_token_inds)

        #doc_tokens=doc_tokens[:500]

        #for batch_num in range(num_batches):
        # batch_num=0
        # start_tok_ind=start_token_inds[batch_num]
        # batch_doc_tokens=doc_tokens[start_tok_ind:start_tok_ind+1500]

        #eval_examples is a list of 10570 'SquadExample' objects
        eval_examples_routes = [
            run_squad.SquadExample(qas_id=0,
                                   question_text=query,
                                   doc_tokens=doc_tokens,
                                   orig_answer_text=None,
                                   start_position=None,
                                   end_position=None)
        ]

        eval_features_routes = run_squad.convert_examples_to_features(
            examples=eval_examples_routes,
            tokenizer=tokenizer,
            max_seq_length=400,  #384,
            doc_stride=300,  #128,
            max_query_length=64,
            is_training=False)

        #print(eval_features_routes)

        #all_input_ids, all_input_mask, and all_segment_ids are Tensors w/ size([100, 384])
        #all_example_index is just list w/ #s 0:99
        input_ids_routes = torch.tensor(
            [f.input_ids for f in eval_features_routes], dtype=torch.long)
        input_mask_routes = torch.tensor(
            [f.input_mask for f in eval_features_routes], dtype=torch.long)
        segment_ids_routes = torch.tensor(
            [f.segment_ids for f in eval_features_routes], dtype=torch.long)
        example_index_routes = torch.arange(input_ids_routes.size(0),
                                            dtype=torch.long)
        eval_data_routes = TensorDataset(input_ids_routes, input_mask_routes,
                                         segment_ids_routes,
                                         example_index_routes)

        model.eval()
        #input_ids_routes=all_input_ids_routes
        #input_mask_routes=all_input_mask_routes
        #segment_ids_routes=all_segment_ids_routes
        #example_indices_routes=all_example_index_routes
        input_ids_routes = input_ids_routes.to(device)
        input_mask_routes = input_mask_routes.to(device)
        segment_ids_routes = segment_ids_routes.to(device)

        #batch_start_logits and batch_end_logits are both size [bs,384]
        with torch.no_grad():
            batch_start_logits_routes, batch_end_logits_routes = model(
                input_ids_routes, segment_ids_routes, input_mask_routes)

        #THIS SECTION TRYING A NEW APPROACH
        #THIS SECTION TRYING A NEW APPROACH
        #THIS SECTION TRYING A NEW APPROACH
        #THIS SECTION TRYING A NEW APPROACH
        RawResult = collections.namedtuple(
            "RawResult", ["unique_id", "start_logits", "end_logits"])
        _PrelimPrediction = collections.namedtuple(  # pylint: disable=invalid-name
            "PrelimPrediction", [
                "feature_index", "start_index", "end_index", "start_logit",
                "end_logit"
            ])
        predict_batch_size = 8
        all_results = []
        for i, example_index in enumerate(example_index_routes):
            #start_logits and end_logits are both lists of len 384
            start_logits_routes = batch_start_logits_routes[i].detach().cpu(
            ).tolist()
            end_logits_routes = batch_end_logits_routes[i].detach().cpu(
            ).tolist()
            eval_feature_routes = eval_features_routes[example_index.item()]
            unique_id_routes = int(eval_feature_routes.unique_id)
            all_results.append(
                RawResult(unique_id=unique_id_routes,
                          start_logits=start_logits_routes,
                          end_logits=end_logits_routes))

        unique_id_to_result = {}
        for result in all_results:
            unique_id_to_result[result.unique_id] = result

        #n_best_size: the total number of n-best predictions to generate in the nbest_predictions.json
        n_best_size = 20
        max_answer_length = 30
        prelim_predictions = []
        for (feature_index, feature) in enumerate(eval_features_routes):
            result = unique_id_to_result[feature.unique_id]
            start_indexes = run_squad._get_best_indexes(
                result.start_logits, n_best_size)
            end_indexes = run_squad._get_best_indexes(result.end_logits,
                                                      n_best_size)
            for start_index in start_indexes:
                for end_index in end_indexes:
                    # We could hypothetically create invalid predictions, e.g., predict
                    # that the start of the span is in the question. We throw out all
                    # invalid predictions.
                    if start_index >= len(feature.tokens):
                        continue
                    if end_index >= len(feature.tokens):
                        continue
                    if start_index not in feature.token_to_orig_map:
                        continue
                    if end_index not in feature.token_to_orig_map:
                        continue
                    if not feature.token_is_max_context.get(
                            start_index, False):
                        continue
                    if end_index < start_index:
                        continue
                    length = end_index - start_index + 1
                    if length > max_answer_length:
                        continue
                    prelim_predictions.append(
                        _PrelimPrediction(
                            feature_index=feature_index,
                            start_index=start_index,
                            end_index=end_index,
                            start_logit=result.start_logits[start_index],
                            end_logit=result.end_logits[end_index]))

        #prelim_predictions is a list of PrelimPrediction's
        #example: PrelimPrediction(feature_index=0, start_index=30, end_index=31, start_logit=7.1346, end_logit=5.40855)
        prelim_predictions = sorted(prelim_predictions,
                                    key=lambda x:
                                    (x.start_logit + x.end_logit),
                                    reverse=True)

        _NbestPrediction = collections.namedtuple(  # pylint: disable=invalid-name
            "NbestPrediction", ["text", "start_logit", "end_logit"])

        seen_predictions = {}
        nbest = []
        for pred in prelim_predictions:
            if len(nbest) >= n_best_size:
                break
            feature = eval_features_routes[pred.feature_index]
            tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
            orig_doc_start = feature.token_to_orig_map[pred.start_index]
            orig_doc_end = feature.token_to_orig_map[pred.end_index]
            orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
            tok_text = " ".join(tok_tokens)
            # De-tokenize WordPieces that have been split off.
            tok_text = tok_text.replace(" ##", "")
            tok_text = tok_text.replace("##", "")
            # Clean whitespace
            tok_text = tok_text.strip()
            tok_text = " ".join(tok_text.split())
            orig_text = " ".join(orig_tokens)
            final_text = run_squad.get_final_text(tok_text,
                                                  orig_text,
                                                  do_lower_case=True,
                                                  verbose_logging=False)
            if final_text in seen_predictions:
                continue
            seen_predictions[final_text] = True
            nbest.append(
                _NbestPrediction(text=final_text,
                                 start_logit=pred.start_logit,
                                 end_logit=pred.end_logit))

        # In very rare edge cases we could have no valid predictions. So we
        # just create a nonce prediction in this case to avoid failure.
        if not nbest:
            nbest.append(
                _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))

        assert len(nbest) >= 1

        total_scores = []
        for entry in nbest:
            total_scores.append(entry.start_logit + entry.end_logit)

        probs = run_squad._compute_softmax(total_scores)

        #print(nbest[0].text)

        #END SECTION TRYING A NEW APPROACH
        #END SECTION TRYING A NEW APPROACH
        #END SECTION TRYING A NEW APPROACH
        #END SECTION TRYING A NEW APPROACH

        the_answer = nbest[0].text

        # #tokenized input
        # document_tokens = tokenizer.tokenize(form.the_document.data)
        # query_tokens = tokenizer.tokenize(form.the_query.data)
        # all_tokens= ['[CLS]'] + query_tokens + ['[SEP]']  + document_tokens + ['[SEP]']
        # # Convert tokens to vocabulary indices
        # all_indices= tokenizer.convert_tokens_to_ids(all_tokens)
        # # Define sentence A and B indices associated to 1st and 2nd sentences
        # query_segids = [0 for i in range(len(query_tokens) +1 )]
        # document_segids = [1 for i in range(len(document_tokens) +2 )]
        # all_segids= query_segids + document_segids
        # assert len(all_segids) == len(all_indices)
        # # Convert inputs to PyTorch tensors
        # tokens_tensor = torch.tensor([all_indices])
        # segments_tensors = torch.tensor([all_segids])
        # input_mask = torch.tensor([1 for i in range(len(all_segids))])
        # input_mask=input_mask.view(1,-1)
        # # Predict all tokens
        # with torch.no_grad(): #https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615/8
        #     start_logits, end_logits = model(tokens_tensor, segments_tensors,input_mask)
        # start_ind=torch.argmax(start_logits).item()
        # end_ind=torch.argmax(end_logits).item()
        #the_answer=all_tokens[start_ind:end_ind+1]

        return render_template('index.html',
                               title='Home',
                               form=form,
                               wik_url=wik_url,
                               the_wik_search=form.the_wik_search.data,
                               the_query=form.the_query.data,
                               the_answer=the_answer)

        #flash('Your Query: {}'.format(
        #    form.the_query.data))
        #flash('The Document: {}'.format(
        #    form.the_document.data))
        #return redirect('/index')
    return render_template(
        'index.html',
        title='Home',
        form=form,
        wik_url="https://en.wikipedia.org/wiki/Janis_Joplin",
        the_wik_search=None,
        the_query=None,
        the_answer="January 19, 1943")
コード例 #12
0
ファイル: routes.py プロジェクト: MarcoDaphne/grandpy_bot_p7
def index():
    form = QueryForm()
    if form.validate_on_submit():
        answer = Answer().answer_user_query(form.query.data)
        return jsonify(answer)
    return render_template('index.html', form=form)