Beispiel #1
0
def autocomplete_ingredients(food_item):

	results = []

	# search = request.args.get('term')
	

	search = '%' + str(food_item) + '%'


	# print search
	query = """SELECT Food_Item.item_id FROM Food_Item WHERE Food_Item.name LIKE %s"""
	result = functions.get_result(query, [search])



	if result is None:
		return []

	all_ingr = []


	for i in range(len(result)):
		ingr_list = functions.get_item_ingredients(result[i]["item_id"])
		for ingredient in ingr_list:
			all_ingr.append(ingredient["name"])

	all_ingr_names = list(set(all_ingr))

	return json.dumps(all_ingr_names)
Beispiel #2
0
def test():
	query = "SELECT Food_Item.name FROM Food_Item"
	results = functions.get_result(query, [])

	newResults = []

	for r in results:
		newResults.append(str(r['name']))

	return render_template('test.html', food_items=json.dumps(newResults))
Beispiel #3
0
def new_item():
	if not session.get(user_id):
		flash(functions.get_flash_message("not_logged_in"))
		return redirect(url_for('login'))


	back_url = request.form['back_url']

	query = "SELECT Food_Item.name FROM Food_Item"
	results = functions.get_result(query, [])

	new_results = []

	for r in results:
		new_results.append(str(r['name']))

	return render_template('newitem.html', food_items=json.dumps(new_results), back_url=back_url)
Beispiel #4
0
# 局所解とバイアスに分ける
solutions, bias = functions.divide_solutions_bias(solutions_data)

# 評価値の結果のリスト
evaluations_result = []

for num_experiment in range(1, 101):
    print(num_experiment)
    # 対象のデータの読み込み
    data = functions.read_csv(read_filename)
    del data[0]
    data = functions.transform_to_float(data)
    # before_index = functions.get_result(data, functions.get_evaluations_list(data, solutions, bias), num_experiment, functions.get_best_solution_index(bias), solutions)[2]
    # 次の世代の作成
    for num in range(num_execute):
        # print('-------')
        # print(functions.get_evaluation_value(data[before_index], solutions, bias))
        data = next_generation_MGG_improve(data, solutions, bias, num_parents, num_children, num_elite_preservation, num_dimentions)
        
        # print(functions.get_result(data, functions.get_evaluations_list(data, solutions, bias), num_experiment, functions.get_best_solution_index(bias), solutions))
        
        # before_index = functions.get_result(data, functions.get_evaluations_list(data, solutions, bias), num_experiment, functions.get_best_solution_index(bias), solutions)[2]
    # 新しい世代をcsvに書き込む
    functions.write_csv(write_filename + '_%i' % num_experiment, data)

    evaluations = functions.get_evaluations_list(data, solutions, bias)
    evaluation_vector = functions.get_result(data, evaluations, num_experiment, functions.get_best_solution_index(bias), solutions)
    evaluations_result.append(evaluation_vector)
final_result = functions.get_final_result(evaluations_result)

functions.write_result(result_file, evaluations_result, final_result)
Beispiel #5
0
# 局所解ファイルの読み込み
solutions_data = functions.read_csv(solutions_file)
del solutions_data[0]
solutions_data = functions.transform_to_float(solutions_data)

# 局所解とバイアスに分ける
solutions, bias = functions.divide_solutions_bias(solutions_data)
solutions = np.array(solutions)
bias = np.array(bias)

evaluations_result = []

for num_experiment in range(1, 101):
    print(num_experiment)
    random = make_random_matrix(5000, 100)

    # 評価値の結果のリスト
    evaluations = functions.get_evaluations_list(random, solutions, bias)
    rankings = functions.get_ranking_list(evaluations)
    matrix100, evaluations100, rankings100 = take_top_100(
        random, evaluations, rankings, solutions, bias)

    evaluation_vector = functions.get_result(
        matrix100, evaluations100, num_experiment,
        functions.get_best_solution_index(bias), solutions)
    evaluations_result.append(evaluation_vector)

final_result = functions.get_final_result(evaluations_result)
functions.write_result(result_file, evaluations_result, final_result)
soup = BeautifulSoup(source, 'lxml')

#a better user agent so it doesn't timeout
a = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
config = Config()
config.browser_user_agent = a
#-----------------------------------------------------------------------
page = 1
while page != explore + 1:
    print()
    print('Page {}...'.format(page))
    print('-' * 80)

    soup = BeautifulSoup(requests.get(url, headers=headers).content, 'html.parser')
    search_div = soup.find_all(class_='rc')  # find all divs that contains search result
    titles, links, descriptions = functions.get_result(search_div, titles, links, descriptions)


    next_link = soup.select_one('a:contains("Next")')
    if not next_link:
        break

    url = 'https://google.com' + next_link['href']
    page += 1



#-----------------------------------------------------------------------


writer.writer(titles,links,cont,config) #writes to file