maxs = max(sensors_data) correlations = correlation(sensors_data) # fft_vals = fft(sensors_data) # entropies = entropy(fft_vals) # energies = energy(fft_vals) return np.hstack([means, vars, mins, maxs, correlations]) def extract_features(segments): return np.array( [extract_features_over_segment(segment) for segment in segments]) if __name__ == '__main__': import os import data_collection import preprocess np.set_printoptions(suppress=True) EXP_LOCATION = os.path.join('data', 'varunchicken1') collector = data_collection.DataCollection(EXP_LOCATION) collector.load() segments = collector.segment() segments = preprocess.preprocess_segments(segments[0:3]) print(extract_features(segments).shape)
index1_1 = 0 index0_2 = 0 index1_2 = 0 index0_3 = 0 index1_3 = 0 distance0 = rc.RatioCalculation() distance1 = rc.RatioCalculation() distance2 = rc.RatioCalculation() # Creating data collection / hit detection log objects sheet_index = 0 # Sheet index in Google Sheets hit_obj0 = dc.DataCollection() hit0 = hit_obj0.run_once_hit(hit_obj0.hit_count) time0 = hit_obj0.run_once_time(hit_obj0.time_count) hit_obj1 = dc.DataCollection() hit1 = hit_obj1.run_once_hit(hit_obj1.hit_count) time1 = hit_obj1.run_once_time(hit_obj1.time_count) hit_obj2 = dc.DataCollection() hit2 = hit_obj2.run_once_hit(hit_obj2.hit_count) time2 = hit_obj2.run_once_time(hit_obj2.time_count) start_time = datetime.datetime.now().replace(microsecond=0) print(start_time) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
app.config["SESSION_PERMANENT"] = False app.config["SESSION_TYPE"] = "filesystem" Session(app) # Configure CS50 Library to use SQLite database db = SQL("sqlite:///user_info.db") # camera configuration camera = cv2.VideoCapture(0) exercise = None video_obj = VideoCamera() # object for data collection data_collection_obj = data_collection.DataCollection() # success_exercise = False @app.after_request def add_header(r): """ Add headers to both force latest IE rendering engine or Chrome Frame, and also to cache the rendered page for 10 minutes. """ r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" r.headers["Pragma"] = "no-cache" r.headers["Expires"] = "0" r.headers['Cache-Control'] = 'public, max-age=0' return r
import sys sys.path.insert(0,'class_') import nlu,tts import random import data_collection # initialize the tts and the datacollection class TTS = tts.Tts() TTS.set_property_voice() datacollection = data_collection.DataCollection() class DM: """ This is a class about dialogue management. It aims to manage all slots and intent compoted from nlu and then return the answer to user. """ def __init__(self,what_film,number_of_tikets,what_time,when,location,gen_info,choice_gen): self.intent_class = nlu.IntentCalssifier() self.what_film = what_film self.number_of_tikets = number_of_tikets self.what_time = what_time self.when = when self.location = location self.gen_info = gen_info self.choice_gen = choice_gen self.slot_booking_film = [self.what_film,self.number_of_tikets,self.what_time,self.location,self.when] self.intents = self.intent_class.intents_class # manage al intent for first time. It is called when the user doesn't repeat. Thus the # robot has undestand the answer of user def manage_first_action(self, intent):
def __init__(self, focal, pp, model_path): self.focal = focal self.pp = pp self.data_collection = data_collection.DataCollection() self.loaded_model = load_model(model_path)
def collect_tweets(): with open('config.json', 'r') as f: config = json.load(f) d = dc.DataCollection(config) d.collect_tweets('output', 'a')