def news(): menu = {'ho':0, 'da':0, 'ml':10, 'se':0, 'co':0, 'cg':0, 'cr':0, 'wc':0,'st':0, 'cf':0, 'ac':1, 're':0, 'cu':0,'nl':0} if request.method == 'GET': return render_template('advanced/imdb.html',menu=menu, weather=get_weather()) else: label = '직접 확인' test_data = [] if request.form['optradio'] == 'index': index = int(request.form['index'] or 0) df = pd.read_csv('F:/workspace/Flask/03_Module/static/data/imdb_test1.csv') test_data.append(df.iloc[index,0]) #test_data.append(df_test.iloc[index, 0]) label = '긍정' if df.sentiment[index] else '부정' else: test_data.append(request.form['review']) #news_pcl = joblib.load('static/model/pipeline_cl.pkl') #news_ptl = joblib.load('static/model/pipeline_tl.pkl') #pred_cl = imdb_count_lr.predict(test_data) #pred_tl = imdb_tfidf_lr.predict(test_data) pred_cl = '긍정' if imdb_count_lr.predict(test_data)[0] else '부정' pred_tl = '긍정' if imdb_tfidf_lr.predict(test_data)[0] else '부정' result_dict = {'label':label, 'pred_cl':pred_cl, 'pred_tl': pred_tl} return render_template('advanced/imdb_res.html', menu=menu, review=test_data[0], res=result_dict, weather=get_weather())
def iris(): if request.method == 'GET': return render_template('classification/iris.html', menu=menu, weather=get_weather()) else: index = int(request.form['index'] or '0') df = pd.read_csv('static/data/iris_test.csv') scaler = joblib.load('static/model/iris_scaler.pkl') test_data = df.iloc[index, :-1].values.reshape(1, -1) test_scaled = scaler.transform(test_data) label = df.iloc[index, -1] lrc = joblib.load('static/model/iris_lr.pkl') svc = joblib.load('static/model/iris_sv.pkl') rfc = joblib.load('static/model/iris_rf.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) species = ['Setosa', 'Versicolor', 'Virginica'] result = { 'index': index, 'label': f'{label} ({species[label]})', 'pred_lr': f'{pred_lr[0]} ({species[pred_lr[0]]})', 'pred_sv': f'{pred_sv[0]} ({species[pred_sv[0]]})', 'pred_rf': f'{pred_rf[0]} ({species[pred_rf[0]]})' } org = dict(zip(df.columns[:-1], df.iloc[index, :-1])) return render_template('classification/iris_res.html', menu=menu, res=result, org=org, weather=get_weather())
def imgopen(): if request.method == 'GET': return render_template('advanced/imgopen.html', menu=menu, weather=get_weather()) else: f_img = request.files['image'] kfile = open('./static/keys/etri_ai_key.txt') eai_key = kfile.read(100) openApiURL = "http://aiopen.etri.re.kr:8000/ObjectDetect" image_file = os.path.join(current_app.root_path, 'static/upload/') + f_img.filename f_img.save(image_file) _, image_type = os.path.splitext(image_file) image_type = 'jpg' if image_type == '.jfif' else image_type[1:] file = open(image_file, 'rb') image_contents = base64.b64encode(file.read()).decode('utf8') request_json = { "request_id": "reserved field", "access_key": eai_key, "argument": { "file": image_contents, "type": image_type } } http = urllib3.PoolManager() response = http.request( "POST", openApiURL, headers={"Content-Type": "application/json; charset=UTF-8"}, body=json.dumps(request_json)) result = json.loads(response.data) obj_list = result['return_object']['data'] image = Image.open(image_file) draw = ImageDraw.Draw(image) for obj in obj_list: name = obj['class'] x = int(obj['x']) y = int(obj['y']) w = int(obj['width']) h = int(obj['height']) draw.text((x + 10, y + 10), name, font=ImageFont.truetype('malgun.ttf', 20), fill=(255, 0, 0)) draw.rectangle(((x, y), (x + w, y + h)), outline=(255, 0, 0), width=2) # plt.savefig(image + image_type); img_file = os.path.join(current_app.root_path, 'static/img/object.png') image.save(img_file) mtime = int(os.stat(img_file).st_mtime) return render_template('advanced/imgopen_res.html', menu=menu, weather=get_weather(), mtime=mtime)
def titanic(): if request.method == 'GET': return render_template('classification/titanic.html', menu=menu, weather=get_weather()) else: index = int(request.form['index'] or '0') df = pd.read_csv('static/data/titanic_test.csv') scaler = joblib.load('static/model/titanic_scaler.pkl') test_data = df.iloc[index, 1:].values.reshape(1,-1) test_scaled = scaler.transform(test_data) label = df.iloc[index, 0] lrc = joblib.load('static/model/titanic_lr.pkl') svc = joblib.load('static/model/titanic_sv.pkl') dtc = joblib.load('static/model/titanic_dt.pkl') rfc = joblib.load('static/model/titanic_rf.pkl') knc = joblib.load('static/model/titanic_kn.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_dt = dtc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) pred_kn = knc.predict(test_scaled) print(label, pred_lr[0], pred_sv[0], pred_dt[0], pred_rf[0],pred_kn[0]) titanic_dict = {'label':label, 'pred_lr':pred_lr[0], 'pred_sv':pred_sv[0], 'pred_dt':pred_dt[0], 'pred_rf': pred_rf[0], 'pred_kn': pred_kn[0]} tmp = df.iloc[index, 1:].values value_list = [] int_index_list = [0, 1, 3, 4, 6, 7] for i in range(8): if i in int_index_list: value_list.append(int(tmp[i])) else: value_list.append(tmp[i]) org = dict(zip(df.columns[1:], value_list)) return render_template('classification/titanic_res.html', menu=menu, weather=get_weather(), res=titanic_dict, org=org)
def pima(): if request.method == 'GET': return render_template('classification/pima.html', menu=menu, weather=get_weather()) else: index = int(request.form['index']) df = pd.read_csv('static/data/pima_test.csv') scaler = joblib.load('static/model/pima_scaler.pkl') test_data = df.iloc[index, :-1].values.reshape(1, -1) test_scaled = scaler.transform(test_data) label = df.iloc[index, -1] lrc = joblib.load('static/model/pima_lr.pkl') svc = joblib.load('static/model/pima_sv.pkl') rfc = joblib.load('static/model/pima_rf.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) result = { 'index': index, 'label': label, 'pred_lr': pred_lr[0], 'pred_sv': pred_sv[0], 'pred_rf': pred_rf[0] } org = dict(zip(df.columns[:-1], df.iloc[index, :-1])) return render_template('classification/pima_res.html', menu=menu, res=result, org=org, weather=get_weather())
def image(): if request.method == 'GET': return render_template('advanced/image.html', menu=menu, weather=get_weather()) else: f_img = request.files['image'] file_img = os.path.join(current_app.root_path, 'static/upload/') + f_img.filename f_img.save(file_img) current_app.logger.debug(f"{f_img.filename}, {file_img}") img = np.array(Image.open(file_img).resize((224, 224))) ''' img = cv2.imread(file_img, -1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (224, 224)) ''' yhat = resnet.predict(img.reshape(-1, 224, 224, 3)) label = decode_predictions(yhat) label = label[0][0] mtime = int(os.stat(file_img).st_mtime) return render_template('advanced/image_res.html', menu=menu, weather=get_weather(), name=label[1], prob=np.round(label[2] * 100, 2), filename=f_img.filename, mtime=mtime)
def iris(): if request.method == 'GET': return render_template('classification/iris.html', menu=menu, weather=get_weather()) else: # pass index = int(request.form['index'] or '0') df = pd.read_csv('static/data/iris_test.csv') scaler = joblib.load('static/model/iris_scaler.pkl') test_data = df.iloc[index, :-1].values.reshape(1,-1) test_scaled = scaler.transform(test_data) label = df.iloc[index, -1] lrc = joblib.load('static/model/iris_lr.pkl') svc = joblib.load('static/model/iris_sv.pkl') dtc = joblib.load('static/model/iris_dt.pkl') rfc = joblib.load('static/model/iris_rf.pkl') knc = joblib.load('static/model/iris_kn.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_dt = dtc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) pred_kn = knc.predict(test_scaled) species = ['Setosa', 'Versicolor', 'Virginica'] # print(label, pred_lr[0], pred_sv[0], pred_dt[0], pred_rf[0],pred_kn[0]) iris_dict = {'label':f'{label} ({species[label]})', 'pred_lr':f'{pred_lr[0]} ({species[pred_lr[0]]})', 'pred_sv':f'{pred_sv[0]} ({species[pred_sv[0]]})', 'pred_dt':f'{pred_dt[0]} ({species[pred_dt[0]]})', 'pred_rf':f'{pred_rf[0]} ({species[pred_rf[0]]})', 'pred_kn': f'{pred_kn[0]} ({species[pred_kn[0]]})'} return render_template('classification/iris_res.html', menu=menu, weather=get_weather(), iris_dict=iris_dict)
def mnist(): if request.method == 'GET': return render_template('advanced/mnist.html', menu=menu, weather=get_weather()) else: index = int(request.form['index'] or '0') index_list = list(range(index, index+3)) df = pd.read_csv('static/data/mnist_test.csv') scaler = joblib.load('static/model/mnist_scaler.pkl') test_data = df.iloc[index:index+3, :-1].values test_scaled = scaler.transform(test_data) label_list = df.iloc[index:index+3, -1] svc = joblib.load('static/model/mnist_sv.pkl') pred_sv = svc.predict(test_scaled) img_file_wo_ext = os.path.join(current_app.root_path, 'static/img/mnist') for i in range(3): digit = test_data[i].reshape(28,28) plt.figure(figsize=(4,4)) plt.xticks([]); plt.yticks([]) img_file = img_file_wo_ext + str(i+1) + '.png' plt.imshow(digit, cmap=plt.cm.binary, interpolation='nearest') plt.savefig(img_file) mtime = int(os.stat(img_file).st_mtime) result_dict = {'index':index_list, 'label':label_list, 'pred_sv':pred_sv,} return render_template('advanced/mnist_res.html', menu=menu, mtime=mtime, result=result_dict, weather=get_weather())
def naver(): if request.method == 'GET': return render_template('advanced/naver.html', menu=menu, weather=get_weather()) else: if request.form['option'] == 'index': index = int(request.form['index'] or '0') df_test = pd.read_csv('static/data/naver/test.tsv', sep='\t') org_review = df_test.document[index] label = '긍정' if df_test.label[index] else '부정' else: org_review = request.form['review'] label = '리뷰 직접 입력' test_data = [] review = re.sub("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]", "", org_review) okt = Okt() stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다','을'] morphs = okt.morphs(review, stem=True) # 토큰화 temp_X = ' '.join([word for word in morphs if not word in stopwords]) # 불용어 제거 test_data.append(temp_X) pred_cl = '긍정' if naver_count_lr.predict(test_data)[0] else '부정' pred_cn = '긍정' if naver_count_nb.predict(test_data)[0] else '부정' pred_tl = '긍정' if naver_tfidf_lr.predict(test_data)[0] else '부정' pred_tn = '긍정' if naver_tfidf_nb.predict(test_data)[0] else '부정' result_dict = { 'label': label, 'pred_cl': pred_cl, 'pred_cn': pred_cn, 'pred_tl': pred_tl, 'pred_tn': pred_tn } return render_template('advanced/naver_res.html', menu=menu, weather=get_weather(), res=result_dict, review=org_review)
def imdb(): if request.method == 'GET': return render_template('advanced/imdb.html', menu=menu, weather=get_weather()) else: test_data = [] if request.form['option'] == 'index': index = int(request.form['index'] or '0') df_test = pd.read_csv('static/data/IMDB_test.csv') test_data.append(df_test.iloc[index, 0]) label = '긍정' if df_test.sentiment[index] else '부정' else: test_data.append(request.form['review']) label = '직접 확인' imdb_count_lr = joblib.load('static/model/imdb_count_lr.pkl') imdb_tfidf_lr = joblib.load('static/model/imdb_tfidf_lr.pkl') pred_cl = '긍정' if imdb_count_lr.predict(test_data)[0] else '부정' pred_tl = '긍정' if imdb_tfidf_lr.predict(test_data)[0] else '부정' result_dict = {'label': label, 'pred_cl': pred_cl, 'pred_tl': pred_tl} return render_template('advanced/imdb_res.html', menu=menu, review=test_data[0], res=result_dict, weather=get_weather())
def news(): target_names = ['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc'] if request.method == 'GET': return render_template('advanced/news.html', menu=menu, weather=get_weather()) else: index = int(request.form['index'] or '0') df = pd.read_csv('static/data/news/test.csv') label = f'{df.target[index]} ({target_names[df.target[index]]})' test_data = [] test_data.append(df.data[index]) news_count_lr = joblib.load('static/model/news_count_lr.pkl') news_tfidf_lr = joblib.load('static/model/news_tfidf_lr.pkl') news_tfidf_sv = joblib.load('static/model/news_tfidf_sv.pkl') pred_c_lr = news_count_lr.predict(test_data) pred_t_lr = news_tfidf_lr.predict(test_data) pred_t_sv = news_tfidf_sv.predict(test_data) result_dict = {'index':index, 'label':label, 'pred_c_lr':f'{pred_c_lr[0]} ({target_names[pred_c_lr[0]]})', 'pred_t_lr':f'{pred_t_lr[0]} ({target_names[pred_t_lr[0]]})', 'pred_t_sv':f'{pred_t_sv[0]} ({target_names[pred_t_sv[0]]})'} return render_template('advanced/news_res.html', menu=menu, news=df.data[index], res=result_dict, weather=get_weather())
def translate(): if request.method == 'GET': return render_template('nat_lang/nl.html', menu=menu, weather=get_weather()) else: text = request.form['text'] label = request.form['label'] # 네이버 파파고 with open('static/keys/papago_key.json') as nkey: json_obj = json.load(nkey) client_id = list(json_obj.keys())[0] client_secret = json_obj[client_id] n_url = "https://naveropenapi.apigw.ntruss.com/nmt/v1/translation" n_mapping = { 'en': 'en', 'jp': 'ja', 'cn': 'zh-CN', 'fr': 'fr', 'es': 'es' } val = {"source": 'ko', "target": n_mapping[label], "text": text} headers = { "X-NCP-APIGW-API-KEY-ID": client_id, "X-NCP-APIGW-API-KEY": client_secret } result = requests.post(n_url, data=val, headers=headers).json() n_text = result['message']['result']['translatedText'] # 카카오 with open('static/keys/kakaoaikey.txt') as kfile: kai_key = kfile.read(100) text = text.replace('\n', '') text = text.replace('\r', '') k_url = f'https://dapi.kakao.com/v2/translation/translate?query={quote(text)}&src_lang=kr&target_lang={label}' result = requests.get(k_url, headers={ "Authorization": "KakaoAK " + kai_key }).json() tr_text_list = result['translated_text'][0] k_translated_text = '\n'.join([tmp_text for tmp_text in tr_text_list]) # 카카오 번역2 ''' with open('static/keys/kakaoaikey.txt') as kfile: kai_key = kfile.read(100) def generate_url(text, src, dst): return f'https://dapi.kakao.com/v2/translation/translate?query={quote(text)}&src_lang={src}&target_lang={dst}' k_target = {'en': 'en', 'ja':'jp', 'zh':'cn', 'fr':'fr', 'es':'es'} result = requests.get(generate_url(text, 'kr', k_target[label]), headers={"Authorization": "KakaoAK "+ kai_key}).json() kaka_text = result['translatedtext'][0][0] ''' n_dt = {'text': text, 'n_text': n_text, 'kaka_text': k_translated_text} return render_template('nat_lang/nl_res.html', menu=menu, weather=get_weather(), ndt=n_dt)
def imdb(): if request.method == 'GET': return render_template('advanced/imdb.html', menu=menu, weather=get_weather()) else: # pass test_data = [] label = '직접 확인' if request.form['option'] == 'index': index = int(request.form['index'] or '0') df = pd.read_csv('static/data/imdb_test.csv') test_data.append(df.review[index]) # label = f'{df.sentiment[index]}' label = '(긍정)' if df.sentiment[index] else '(부정)' else: test_data.append(request.form['review']) pred_c_lr = imdb_count_lr.predict(test_data) pred_t_lr = imdb_tfidf_lr.predict(test_data) imdb_dict = { 'label': label, 'pred_c_lr': pred_c_lr[0], 'pred_t_lr': pred_t_lr[0] } return render_template('advanced/imdb_res.html', menu=menu, imdb=test_data[0], ies=imdb_dict, weather=get_weather())
def stock(): menu = { 'ho': 0, 'da': 1, 'ml': 0, 'se': 0, 'co': 0, 'cg': 0, 'cr': 0, 'st': 1, 'wc': 0 } if request.method == 'GET': return render_template('stock/stock.html', menu=menu, weather=get_weather(), kospi=kospi_dict, kosdaq=kosdaq_dict) else: market = request.form['market'] if market == 'KS': code = request.form['kospi_code'] company = kospi_dict[code] code += '.KS' else: code = request.form['kosdaq_code'] company = kosdaq_dict[code] code += '.KQ' learn_period = int(request.form['learn']) pred_period = int(request.form['pred']) today = datetime.now() start_learn = today - timedelta(days=learn_period * 365) end_learn = today - timedelta(days=1) stock_data = pdr.DataReader(code, data_source='yahoo', start=start_learn, end=end_learn) current_app.logger.debug(f"get stock data: {code}") df = pd.DataFrame({'ds': stock_data.index, 'y': stock_data.Close}) df.reset_index(inplace=True) del df['Date'] model = Prophet(daily_seasonality=True) model.fit(df) future = model.make_future_dataframe(periods=pred_period) forecast = model.predict(future) fig = model.plot(forecast) img_file = os.path.join(current_app.root_path, 'static/img/stock.png') fig.savefig(img_file) mtime = int(os.stat(img_file).st_mtime) return render_template('stock/stock_res.html', menu=menu, weather=get_weather(), mtime=mtime, company=company, code=code)
def iris(): menu = { 'ho': 0, 'da': 0, 'ml': 10, 'se': 0, 'co': 0, 'cg': 0, 'cr': 0, 'wc': 0, 'st': 0, 'cf': 0, 'ac': 0, 're': 1, 'cu': 0, 'nl': 0 } if request.method == 'GET': return render_template('regression/iris.html', menu=menu, weather=get_weather()) else: index = int(request.form['index']) feature_name = request.form['feature'] column_dict = { 'sl': 'Sepal length', 'sw': 'Sepal width', 'pl': 'Petal length', 'pw': 'Petal width', 'species': ['Setosa', 'Versicolor', 'Virginica'] } column_list = list(column_dict.keys()) df = pd.read_csv('static/data/iris_train.csv') df.columns = column_list X = df.drop(columns=feature_name, axis=1).values y = df[feature_name].values lr = LinearRegression() lr.fit(X, y) weight, bias = lr.coef_, lr.intercept_ df_test = pd.read_csv('static/data/iris_test.csv') df_test.columns = column_list X_test = df_test.drop(columns=feature_name, axis=1).values[index] pred_value = np.dot(X_test, weight.T) + bias x_test = list(df_test.iloc[index, :-1].values) x_test.append(column_dict['species'][int(df_test.iloc[index, -1])]) org = dict(zip(column_list, x_test)) pred = dict(zip(column_list[:-1], [0, 0, 0, 0])) pred[feature_name] = np.round(pred_value, 2) return render_template('regression/iris_res.html', menu=menu, weather=get_weather(), index=index, org=org, pred=pred, feature=column_dict[feature_name])
def translate(): if request.method == 'GET': return render_template('natlang/translate.html', menu=menu, weather=get_weather()) else: text = request.form['text'] lang = request.form['lang'] ### 네이버(파파고) 번역 ### n_url = "https://naveropenapi.apigw.ntruss.com/nmt/v1/translation" with open('static/keys/papago_key.json') as nkey: json_obj = json.load(nkey) client_id = list(json_obj.keys())[0] client_secret = json_obj[client_id] headers = { "X-NCP-APIGW-API-KEY-ID": client_id, "X-NCP-APIGW-API-KEY": client_secret } n_mapping = { 'en': 'en', 'jp': 'ja', 'cn': 'zh-CN', 'fr': 'fr', 'es': 'es' } val = {"source": 'ko', "target": n_mapping[lang], "text": text} result = requests.post(n_url, data=val, headers=headers).json() naver_res = result['message']['result']['translatedText'] ############################# ### 카카오 번역 ### with open('static/keys/kakaoaikey.txt') as kfile: kai_key = kfile.read(100) text = text.replace('\n', '') text = text.replace('\r', '') k_url = f'https://dapi.kakao.com/v2/translation/translate?query={quote(text)}&src_lang=kr&target_lang={lang}' result = requests.get(k_url, headers={ "Authorization": "KakaoAK " + kai_key }).json() tr_text_list = result['translated_text'][0] kakao_res = '\n'.join([tmp_text for tmp_text in tr_text_list]) ############################# result_dict = { 'input': text, 'lang': lang, 'naver': naver_res, 'kakao': kakao_res } return render_template('natlang/translate_res.html', menu=menu, weather=get_weather(), res=result_dict)
def titanic(): menu = { 'ho': 0, 'da': 0, 'ml': 10, 'se': 0, 'co': 0, 'cg': 0, 'cr': 0, 'wc': 0, 'st': 0, 'cf': 1, 'ac': 0, 're': 0, 'cu': 0, 'nl': 0 } if request.method == 'GET': return render_template('classification/titanic.html', menu=menu, weather=get_weather()) else: index = int(request.form['index']) df = pd.read_csv('static/data/titanic_test.csv') scaler = joblib.load('static/model/titanic_scaler.pkl') test_data = df.iloc[index, :-1].values.reshape(1, -1) test_scaled = scaler.transform(test_data) label = df.iloc[index, 0] lrc = joblib.load('static/model/titanic_lr.pkl') svc = joblib.load('static/model/titanic_sv.pkl') rfc = joblib.load('static/model/titanic_rf.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) result = { 'index': index, 'label': label, 'pred_lr': pred_lr[0], 'pred_sv': pred_sv[0], 'pred_rf': pred_rf[0] } tmp = df.iloc[index, 1:].values value_list = [] int_index_list = [0, 1, 3, 4, 6, 7] for i in range(8): if i in int_index_list: value_list.append(int(tmp[i])) else: value_list.append(tmp[i]) org = dict(zip(df.columns[1:], value_list)) return render_template('classification/titanic_res.html', menu=menu, res=result, org=org, weather=get_weather())
def detect(): if request.method == 'GET': return render_template('advanced/detect.html', menu=menu, weather=get_weather()) else: f_img = request.files['image'] file_img = os.path.join(current_app.root_path, 'static/upload/') + f_img.filename f_img.save(file_img) _, image_type = os.path.splitext(f_img.filename) image_type = 'jpg' if image_type == '.jfif' else image_type[1:] current_app.logger.debug(f"{f_img.filename}, {image_type}") # 공공 인공지능 Open API - 객체 검출 with open('static/keys/etri_ai_key.txt') as kfile: eai_key = kfile.read(100) with open(file_img, 'rb') as file: image_contents = base64.b64encode(file.read()).decode('utf8') openApiURL = "http://aiopen.etri.re.kr:8000/ObjectDetect" request_json = { "request_id": "reserved field", "access_key": eai_key, "argument": { "file": image_contents, "type": image_type } } http = urllib3.PoolManager() response = http.request( "POST", openApiURL, headers={"Content-Type": "application/json; charset=UTF-8"}, body=json.dumps(request_json) ) if response.status != 200: return redirect(url_for('aclsf_bp.detect')) result_json = json.loads(response.data) obj_list = result_json['return_object']['data'] image = Image.open(file_img) draw = ImageDraw.Draw(image) object_list = [] for obj in obj_list: name = obj['class'] x = int(obj['x']) y = int(obj['y']) w = int(obj['width']) h = int(obj['height']) draw.text((x+10,y+10), name, font=ImageFont.truetype('malgun.ttf', 20), fill=(255,0,0)) draw.rectangle(((x, y), (x+w, y+h)), outline=(255,0,0), width=2) object_list.append(name) object_img = os.path.join(current_app.root_path, 'static/img/object.'+image_type) image.save(object_img) mtime = int(os.stat(object_img).st_mtime) return render_template('advanced/detect_res.html', menu=menu, weather=get_weather(), object_list=', '.join(obj for obj in object_list), filename='object.'+image_type, mtime=mtime)
def iris(): menu = { 'ho': 0, 'da': 0, 'ml': 10, 'se': 0, 'co': 0, 'cg': 0, 'cr': 0, 'wc': 0, 'st': 0, 'cf': 1, 'ac': 0, 're': 0, 'cu': 0, 'nl': 0 } if request.method == 'GET': return render_template('classification/iris.html', menu=menu, weather=get_weather()) else: index = int(request.form['index']) df = pd.read_csv('static/data/iris_test.csv') scaler = joblib.load('static/model/iris_scaler.pkl') test_data = df.iloc[index, :-1].values.reshape(1, -1) test_scaled = scaler.transform(test_data) label = df.iloc[index, -1] lrc = joblib.load('static/model/iris_lr.pkl') svc = joblib.load('static/model/iris_sv.pkl') rfc = joblib.load('static/model/iris_rf.pkl') pred_lr = lrc.predict(test_scaled) pred_sv = svc.predict(test_scaled) pred_rf = rfc.predict(test_scaled) species = ['Setosa', 'Versicolor', 'Virginica'] result = { 'index': index, 'label': label, 'pred_lr': pred_lr[0], 'pred_sv': pred_sv[0], 'pred_rf': pred_rf[0], 'species': species[pred_sv[0]] } org = dict(zip(df.columns[:], df.iloc[index, :])) return render_template('classification/iris_res.html', menu=menu, res=result, org=org, weather=get_weather())
def digits(): if request.method == 'GET': return render_template('advanced/digits.html', menu=menu, weather=get_weather()) else: # pass index = int(request.form['index'] or '0') index_list = list(range(index, index + 5)) digits = load_digits() df = pd.read_csv('static/data/digits_test.csv') img_index_list = df['index'].values target_index_list = df['target'].values index_list = img_index_list[index:index + 5] scaler = MinMaxScaler() scaled_test = scaler.fit_transform( df.drop(columns=['index', 'target'], axis=1)) test_data = scaled_test[index:index + 5, :] label_list = target_index_list[index:index + 5] lrc = joblib.load('static/model/digits_lr.pkl') svc = joblib.load('static/model/digits_sv.pkl') rfc = joblib.load('static/model/digits_rf.pkl') pred_lr = lrc.predict(test_data) pred_sv = svc.predict(test_data) pred_rf = rfc.predict(test_data) img_file_wo_ext = os.path.join(current_app.root_path, 'static/img/digit') for k, i in enumerate(index_list): plt.figure(figsize=(2, 2)) plt.xticks([]) plt.yticks([]) img_file = img_file_wo_ext + str(k + 1) + '.png' plt.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest') plt.savefig(img_file) mtime = int(os.stat(img_file).st_mtime) result_dict = { 'index': index_list, 'label': label_list, 'pred_lr': pred_lr, 'pred_sv': pred_sv, 'pred_rf': pred_rf } return render_template('advanced/digits_res.html', menu=menu, mtime=mtime, result=result_dict, weather=get_weather())
def naver(): menu = {'ho':0, 'da':0, 'ml':10, 'se':0, 'co':0, 'cg':0, 'cr':0, 'wc':0,'st':0, 'cf':0, 'ac':1, 're':0, 'cu':0,'nl':0} if request.method == 'GET': return render_template('advanced/naverR.html',menu=menu, weather=get_weather()) else: if request.form['optradio'] == 'index': #int로 들어올때 index = int(request.form['index'] or 0) df = pd.read_csv('F:/workspace/machine-Learning/00.data/naverMovie/test.tsv' ,sep='\t') #test_data.append(df.iloc[index,2]) org_review = df.document[index] state = '긍정' if df.label[index] else '부정' else: #직접입력 org_review = request.form['review'] state = '직접 입력' test_data = [] review= re.sub("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","",org_review) okt = Okt() morphs =okt.morphs(review,stem= True) stopwords = [ '의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다'] temp_X = ' '.join([word for word in morphs if not word in stopwords]) test_data.append(temp_X) naver_count_lr = joblib.load('static/model/Npipeline_cl.pkl') naver_tfidf_lr = joblib.load('static/model/Npipeline_tl.pkl') naver_count_nv = joblib.load('static/model/Npipeline_cn.pkl') naver_tfidf_nv = joblib.load('static/model/Npipeline_tn.pkl') pred_cl = '긍정' if naver_count_lr.predict(test_data)[0] else '부정' pred_tl = '긍정' if naver_tfidf_lr.predict(test_data)[0] else '부정' pred_cn = '긍정' if naver_count_nv.predict(test_data)[0] else '부정' pred_tn = '긍정' if naver_tfidf_nv.predict(test_data)[0] else '부정' result_dict = {'state':state, 'pred_cl':pred_cl, 'pred_cn':pred_cn, 'pred_tn': pred_tn, 'pred_tl': pred_tl} return render_template('advanced/naverR_res.html', menu=menu, review=org_review, res=result_dict, weather=get_weather())
def diabetes(): if request.method == 'GET': return render_template('regression/diabetes.html', menu=menu, weather=get_weather()) else: # pass index = int(request.form['index'] or '0') feature_name = request.form['feature'] df = pd.read_csv('static/data/diabetes_train.csv') X = df[feature_name].values.reshape(-1, 1) y = df.target.values lr = LinearRegression() lr.fit(X, y) weight, bias = lr.coef_, lr.intercept_ df_test = pd.read_csv('static/data/diabetes_test.csv') X_test = df_test[feature_name][index] y_test = df_test.target[index] # pred = X_test * weight[0] + bias pred = np.round(X_test * weight[0] + bias, 2) # 시각화 y_min = np.min(X) * weight[0] + bias y_max = np.max(X) * weight[0] + bias plt.figure() plt.scatter(X, y, label='train') plt.plot([np.min(X), np.max(X)], [y_min, y_max], 'r', lw=3) plt.scatter([X_test], [y_test], c='r', marker='*', s=100, label='test') plt.grid() plt.legend() plt.title(f'{feature_name}') img_file = os.path.join(current_app.root_path, 'static/img/diabetes.png') plt.savefig(img_file) mtime = int(os.stat(img_file).st_mtime) result_dict = { 'index': index, 'feature': feature_name, 'y': y_test, 'pred': pred } return render_template('regression/diabetes_res.html', menu=menu, weather=get_weather(), mtime=mtime, res=result_dict)
def music_jquery(): menu = {'ho':0, 'da':1, 'ml':0, 'se':0, 'co':0, 'cg':0, 'cr':1, 'wc':0, 'cf':0, 'ac':0, 're':0, 'cu':0} music_list = cu.genie() return render_template('crawling/music_jquery.html', menu=menu, weather=get_weather(), music_list=music_list)
def coffee(): if request.method == 'GET': return render_template('cartogram/coffee.html', menu=menu, weather=get_weather_main()) else: item = request.form['item'] # 파일 저장 f = request.files['csv'] #filename = os.path.join(current_app.root_path, 'static/upload/') + secure_filename(f.filename) filename = os.path.join(current_app.root_path, 'static/upload/') + f.filename f.save(filename) current_app.logger.info(f'{filename} is saved.') # 화면에 쓰기위한것 # 커피데이터를 정수로 읽기 위해 int로 줌 coffee_index = pd.read_csv(filename, dtype={'이디야 매장수': int, '스타벅스 매장수':int, '커피빈 매장수': int, '빽다방 매장수':int}) # 컬러를 따로 주기위해 따로 칼라맵 코드를 만들어서 줌 color_dict = {'커피지수':'Reds', '이디야 매장수':'Blues', '스타벅스 매장수': 'Greens','커피빈 매장수':'Purples', '빽다방 매장수':'PuBu'} # 이미지 파일을 만드는 것 img_file = os.path.join(current_app.root_path, 'static/img/coffee.png') # 파일이름 dk.drawKorea(item, coffee_index, color_dict[item], img_file) # 파일이름을 drawKorea에게 전달 mtime = int(os.stat(img_file).st_mtime) #top10 만들기 # 컬럼명으로 솔팅해서 id랑 item 을 빼옴 df = coffee_index.sort_values(by=item, ascending=False)[['ID', item]].reset_index() top10={} for i in range(10): # 랜더링할때 라운딩을 해서 소수점 2자리까지 top10[df['ID'][i]] = round(df[item][i], 2) return render_template('cartogram/coffee_res.html', menu=menu, weather=get_weather(), mtime=mtime, item=item, top10=top10)
def park_gu(option): menu = { 'ho': 0, 'da': 1, 'ml': 0, 'se': 1, 'co': 0, 'cg': 0, 'cr': 0, 'st': 0, 'wc': 0 } park_new = pd.read_csv('./static/data/park_info.csv') park_gu = pd.read_csv('./static/data/park_gu.csv') park_gu.set_index('지역', inplace=True) geo_str = json.load( open('./static/data/skorea_municipalities_geo_simple.json', encoding='utf8')) # if- ifelse문을 반복문으로 변경 option_dict = { 'area': '공원면적', 'count': '공원수', 'area_ratio': '공원면적비율', 'per_person': '인당공원면적' } column_index = option_dict[option].replace(' ', '') map = folium.Map(location=[37.5502, 126.982], zoom_start=11, tiles='Stamen Toner') map.choropleth(geo_data=geo_str, data=park_gu[column_index], columns=[park_gu.index, park_gu[column_index]], fill_color='PuRd', key_on='feature.id') for i in park_new.index: folium.CircleMarker( [park_new.lat[i], park_new.lng[i]], radius=int(park_new['size'][i]), tooltip=f"{park_new['공원명'][i]}({int(park_new.area[i])}㎡)", color='green', fill_color='green').add_to(map) html_file = os.path.join(current_app.root_path, 'static/img/park_gu.html') map.save(html_file) mtime = int(os.stat(html_file).st_mtime) option_dict = { 'area': '공원면적', 'count': '공원수', 'area_ratio': '공원면적 비율', 'per_person': '인당 공원면적' } return render_template('seoul/park_gu.html', menu=menu, weather=get_weather(), option=option, option_dict=option_dict, mtime=mtime)
def population(option): menu = { 'ho': 0, 'da': 1, 'ml': 0, 'se': 0, 'co': 0, 'cg': 1, 'cr': 0, 'st': 0, 'wc': 0 } df_pop = pd.read_csv('./static/data/population.csv') column_dict = {'extinction': '소멸위기지역', 'g_extinction': '소멸비율'} color_dict = {'extinction': 'Blues', 'g_extinction': 'Greens'} img_file = os.path.join(current_app.root_path, 'static/img/population.png') # 파일이름 dk.drawKorea(column_dict[option], df_pop, color_dict[option], img_file) # 소멸위기지역, df, 컬러, 이미지파일 전달 mtime = int(os.stat(img_file).st_mtime) return render_template('cartogram/population.html', menu=menu, weather=get_weather(), option=option, column_dict=column_dict, mtime=mtime)
def book(): menu = {'ho':0, 'da':1, 'ml':0, 'se':0, 'co':0, 'cg':0, 'cr':1, 'wc':0, 'cf':0, 'ac':0, 're':0, 'cu':0} book_list = cu.interpark() return render_template('crawling/book.html', menu=menu, weather=get_weather(), book_list=book_list)
def food(): menu = { 'ho': 0, 'da': 1, 'ml': 0, 'se': 0, 'co': 0, 'cg': 0, 'cr': 1, 'st': 0, 'wc': 0, 'cf': 0, 'ac': 0, 're': 0, 'cu': 0, 'nl': 0 } if request.method == 'GET': place = request.args.get('place', '발산역') rest_list = cu.siksin(place) return render_template('crawling/food.html', menu=menu, weather=get_weather(), rest_list=rest_list, place=place) else: place = request.form['place'] return redirect(url_for('crawl_bp.food') + f'?place={place}')
def naver(): if request.method == 'GET': return render_template('advanced/naver.html', menu=menu, weather=get_weather()) else: # pass if request.form['option'] == 'index': index = int(request.form['index'] or '0') df_test = pd.read_csv('static/data/naver/movie_test.tsv', sep='\t') # test_data.append(df_test.document[index]) org_review = df_test.document[index] # label = f'{df_test.sentiment[index]}' label = '(긍정)' if df_test.label[index] else '(부정)' else: org_review = request.form['review'] label = '직접 확인' # test_data.append(request.form['review']) test_data = [] review = re.sub("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]", "", org_review) okt = Okt() stopwords = [ '의', '가', '이', '은', '들', '는', '좀', '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다', '을' ] morphs = okt.morphs(review, stem=True) # 토큰화 temp_X = ' '.join([word for word in morphs if not word in stopwords]) # 불용어 제거 test_data.append(temp_X) pred_c_lr = naver_count_lr.predict(test_data) pred_c_nb = naver_count_nb.predict(test_data) pred_t_lr = naver_tfid_lr.predict(test_data) pred_t_nb = naver_tfid_nb.predict(test_data) naver_dict = { 'label': label, 'pred_c_lr': pred_c_lr[0], 'pred_c_nb': pred_c_nb[0], 'pred_t_lr': pred_t_lr[0], 'pred_t_nb': pred_t_nb[0] } return render_template('advanced/naver_res.html', menu=menu, imdb=test_data[0], nav=naver_dict, weather=get_weather())
def cancer(): menu = { 'ho': 0, 'da': 0, 'ml': 10, 'se': 0, 'co': 0, 'cg': 0, 'cr': 0, 'wc': 0, 'cf': 1, 'ac': 0, 're': 0, 'cu': 0 } if request.method == 'GET': return render_template('classification/cancer.html', menu=menu, weather=get_weather()) else: index = int(request.form['index']) df = pd.read_csv('static/data/cancer_test.csv') scaler = MinMaxScaler() scaled_test = scaler.fit_transform(df.iloc[:, :-1]) test_data = scaled_test[index, :].reshape(1, -1) label = df.iloc[index, -1] lrc = joblib.load('static/model/cancer_lr.pkl') svc = joblib.load('static/model/cancer_sv.pkl') rfc = joblib.load('static/model/cancer_rf.pkl') pred_lr = lrc.predict(test_data) pred_sv = svc.predict(test_data) pred_rf = rfc.predict(test_data) result = { 'index': index, 'label': label, 'pred_lr': pred_lr[0], 'pred_sv': pred_sv[0], 'pred_rf': pred_rf[0] } org = dict(zip(df.columns[:-1], df.iloc[index, :-1])) return render_template('classification/cancer_res.html', menu=menu, res=result, org=org, weather=get_weather())