def newDay(self, weekly, monthly, yearly, current_day): results = [0, 0, 0] week = cl.Classify() history = week.trainNetwork(self.epochs, weekly[:, :7:], weekly[:, 7:8:]) results[0] = week.testNetwork(weekly[:, 1:8:]) if current_day % 15 == 1 or current_day == 0: month = rg.Regression() history = month.trainNetwork(self.epochs, monthly[:, :30:], monthly[:, 30:31:]) self.results_monthly = month.testNetwork(monthly[:, 1:31:]) self.results_monthly = self.results_monthly.flatten() results[1] = self.results_monthly if current_day % (365 / 2) == 1 or current_day == 0: year = rg.Regression() history = year.trainNetwork(self.epochs, yearly[:, :365:], yearly[:, 365:366:]) self.results_yearly = year.testNetwork(yearly[:, 1:366:]) self.results_yearly = self.results_yearly.flatten() results[2] = self.results_yearly self.results = np.average(results, axis=0) return self.results
def __init__(self): """初始化""" super().__init__() self.status = tk.StringVar(value='启动') self.selection = tk.IntVar(value=0) self.clfy_ins = classify.Classify('.') self.content_list = [] self.setup_ui()
def __init__(self, master=None, model_w2v=None, keys=None): super().__init__(master) self.answers = ['...', '...', '...', '...'] self.Contextlength = 1 self.texts = [] self.w2v = functions.load_w2v(consts.W2V_PATH) print('Classify class initialization...') self.models = [ classify.Classify(1), classify.Classify(2), classify.Classify(3), classify.Classify(4), classify.Classify(5) ] print('Done.') styles.create_styles(self) self.create_widgets()
def __init__(self): self.CONF = 0.7 self.classifier = classify.Classify() self.preprocessor = preprocess.PreProcessor() self.class_names = self.classifier.get_class_names() self.cache = cache.Cache(max_size=10) self.faceCascade = cv2.CascadeClassifier( 'haarcascade_frontalface_default.xml')
def model(self): """main model, with supervised machine learning; calls to process datasets""" logger.info('[*] Starting processing of dataset ...') cl = classify.Classify(logger) data = cl.get_dataset() logger.info('[*] Using K-nearest neighbour algorithm ...') self.knn_model = KNeighborsClassifier(n_neighbors=self.knn) self.train_and_test(data) return True
def __init__(self, mode='ensemble'): self.test_data = None self.x_test = None self.y_test = None self.mode = mode self.cl_y_test_unfused = [] self.cl_y_pred_nufused = [] self.cl_y_test_fused = [] self.cl_y_pred_fused = [] self.npc_y_test = [] self.npc_y_pred = [] self.ensemble_pred = [] self.session_started = False self.session_ended = False self.cpp = classifier_preprocess.ClassifierPreProcess() self.tpp = classifier_preprocess.NLTKPreprocessor() if self.mode == 'ensemble' or self.mode == 'classifier': self.classifier = classify.Classify() self.npc = npceditor_interface.NPCEditor() self.utterance_df = pd.read_csv( open(os.path.join("data", "utterance_data.csv"), 'rb')) #variables to keep track of session self.blacklist = set() self.pal3_status_codes = { '_START_SESSION_', '_INTRO_', '_IDLE_', '_TIME_OUT_', '_END_SESSION_', '_EMPTY_' } #status codes that PAL3 sends to code self.utterances_prompts = {} #responses for the special cases self.user_logs = [] self.suggestions = {} for i in range(len(self.utterance_df)): situation = self.utterance_df.iloc[i]['situation'] video_name = self.utterance_df.iloc[i]['ID'] utterance = self.utterance_df.iloc[i]['utterance'] if situation in self.utterances_prompts: self.utterances_prompts[situation].append( (video_name, utterance)) else: self.utterances_prompts[situation] = [(video_name, utterance)] self.prepare_suggest_data()
def classifyrunner(queue, request): try: import classify cl = classify.Classify() cl.do_dataset(queue, request) except: import sys, traceback traceback.print_exc(file=sys.stdout) print("\n") import random f = open( "/tmp/outgem" + argstr() + str(random.randint(1000, 9999)) + ".txt", "w") f.write(request.get_data(as_text=True)) traceback.print_exc(file=f) f.close() return (Response(json.dumps({ "accuracy": None, "loss": None }), mimetype='application/json'))
def __init__(self, mon): threading.Thread.__init__(self) #threading-class initialisieren self.daemon = True self.mon = mon #monitor object for time requests self.classify = classify.Classify() #intervals to considering in seconds self.interval = rc.config.getint('checkbehavior','interval') #tolerance, in which an activity event (in this case water flow) has to occure #means an regulary event can fire in an timeslot +/- (tolerance * interval) seconds self.toleranceIntervals = rc.config.getint('checkbehavior','toleranceIntervals') #number of recorded days, per workdays and free days self.observePeriod = rc.config.getint('checkbehavior','observePeriod') #number of days to learn before seheiah decide about emergency case self.minObservedPeriod = rc.config.getint('checkbehavior','minObservedPeriod') #emergency counter self.emergency = 0 #marker for actions self.markerCheckBehavior = False #wurde verhalten im interval abgefragt? self.markerCheckDelete = False #wurden alte Werte korrekt gelöscht? self.absence = absence.Absence()
def __init__(self): self.db = logdb.logDB() self.classify = classify.Classify()
import classify import sys import cv2 import preprocess import time classifier = classify.Classify() preprocessor = preprocess.PreProcessor() camera = cv2.VideoCapture(0) i = 0; start = time.time() prediction_out_dir = "prediction_output_images/" print("start_time ", start) while True: return_value, image = camera.read() # print("time new capture:", time.asctime( time.localtime(time.time()) )) # cv2.imwrite('opencv'+str(i)+'.png', image) commented by anuj cv2.imwrite(prediction_out_dir + 'opencv.png', image) # print("time image written:", time.asctime( time.localtime(time.time()) )) # bb = (preprocessor.align('opencv'+str(i)+'.png')) commented by anuj bb = (preprocessor.align(prediction_out_dir + 'opencv.png')) # print("time alignment done: ", time.asctime( time.localtime(time.time()) )) if bb.any() == False: pass else: cv2.rectangle(image, (bb[0],bb[1]), (bb[2],bb[3]), (0, 255, 0), 5)
import web import app import json import classify as classify import base64 from requests.auth import HTTPBasicAuth classify = classify.Classify() users = (("user", "pass"), ("admin", "admin")) class Api(): def GET(self): try: result = {} result["status"] = 200 result["message"] = { "Authorization": "Basic Auth", "image": "JPG File", "method": "POST" } return json.dumps(result) except Exception as error: result = {} result["status"] = "400" result["message"] = error.args[0] return json.dumps(result) def POST(self): try:
def do_classify(): import classify cl = classify.Classify() return cl.do_classify(request)
def app(): st.subheader('Select the Question') data = pd.read_csv('APP/Data/question_draft.CSV') question = st.selectbox("질문을 선택해 주십시오.", data['question']) st.write("선택된 질문은 <" + question + "> 입니다.") c_btn = st.button("Classify", key='1') if c_btn: result = question.title() clssfy = classify.Classify() with st.spinner('분류 중 입니다...'): category = clssfy.classification(result) selected_data = data[data['question'].str.contains(question)] draft1 = selected_data['draft1'].values[0] draft2 = selected_data['draft2'].values[0] draft3 = selected_data['draft3'].values[0] draft4 = selected_data['draft4'].values[0] if category == 0: # 지원동기 및 포부 st.success("지원 동기 및 포부 관련 질문입니다.") st.subheader('Enter the Experience Automatically') st.markdown("- 동기 Motivation\n" " - " + draft1 + "\n" "- 비전 Vision\n" " - " + draft2 + "\n" "- 열정 Passion\n" " - " + draft3 + "\n" "- 역량 Competence\n" " - " + draft4) # 요약 : 역량 + 비전 draft = draft3 + draft2 elif category == 1: # 경험 역량 st.success("경험, 역량 관련 질문입니다.") st.subheader('Enter the Experience Automatically') st.markdown("- 상황 Situation\n" " - " + draft1 + "\n" "- 위기 Crisis\n" " - " + draft2 + "\n" "- 행동 Action\n" " - " + draft3 + "\n" "- 결과 Result\n" " - " + draft4) # 요약 : 상황 + 결과 draft = draft1 + draft4 elif category == 2: # for test st.success("요약 테스트입니다.") st.subheader('Test Summarization Automatically') st.markdown("- --\n" " - " + draft1) draft2 = "" draft3 = "" draft4 = "" draft = draft1 smmry = summary_v2.Summary() with st.spinner('요약 중 입니다...'): drft_smmry = smmry.summarization(draft) st.text("\"NEWS SUMMARY\" Dataset을 이용하여 Summary 하였습니다.") st.success("완성된 요약문은 \"" + drft_smmry + "\" 입니다.") st.subheader('Draft') st.info(drft_smmry + "\n\n " + draft1 + "\n\n " + draft2 + "\n\n " + draft3 + "\n\n " + draft4 + "\n\n ")
with open(random_word_path) as file: return file.read().splitlines() #get the lrandom word list randomwordList = getRandomWordList() # init the first words if not os.path.isfile(os.path.join(working_directory, "latest.p")): pickle.dump("beach", open("latest.p", "wb")) latestWord = pickle.load(open("latest.p", "rb")) #init flickr and keras helper flickrHelper = download_flickr.Flickr() kerasHelper = classify.Classify() #start process for i, word in enumerate(randomwordList): if word == latestWord: print('Processing word ' + word + ' ' + str(i + 1) + ' out of ' + str(len(randomwordList))) photo_urls_src = [] photo_urls_org = [] helperResponse = flickrHelper.loadImageData(word) for photo in helperResponse['photos']['photo']: if 'url_q' in photo.keys() and 'url_o' in photo.keys(): photo_urls_src.append(photo['url_q']) photo_urls_org.append(photo['url_o'])
import model import classify # constants path_to_topgenes = '../../../data/knownvariants/risk_genes.csv' path_to_dataset = '../../../data/full_dataset/imputed_clean_full_dataset.csv.gz' path_to_test = '../../../data/full_dataset/data_for_test.csv.gz' path_to_risk_genes_list = '../../../data/knownvariants/genes_list.csv' path_to_risk_genes_list_test = '../../../data/knownvariants/genes_list_test.csv' path_to_new_features = '../../../data/full_dataset/full_data_new_features.csv' #path_to_dataset = '../../../data/full_dataset/full_treated_dataset_no_top100.csv.gz' # call model #run = model.model(path_to_dataset, path_to_topgenes) #run.topGenes() # call classifier classifiers = classify.Classify(path_to_new_features) (frequencies, f1s) = classifiers.classify_it() print(frequencies)
def classifyrunner2(queue, request): import classify cl = classify.Classify() cl.do_learntestclassify(queue, request)
def filenamerunner(queue, request): import classify cl = classify.Classify() return cl.do_filename(queue, request)
def addTab(self, nb): page2 = classify.Classify(nb, self) nb.add(page2, text="Classify")
def do_learntest(): import classify cl = classify.Classify() return cl.do_learntest(request)
# -*- coding:utf-8 -*- from __future__ import print_function import os import sys sys.path.append('lib') from itertools import izip import numpy as np import classify import stream as img_stream import serial from log import sys_logger as logger model = classify.Classify(10) streamFactory = img_stream.Stream() imgGenerator = streamFactory.from_camera(0) ser = serial.Serial('mock') scene = 0 logger.info('lanuch successfully') while True: # check serial if ser.is_ready(): cmd, payload = ser.get_payload() # fit the model if cmd == 0: