def on_modified(self, event): new_name = "new_file"+ str(self.i) + ".txt" for filename in os.listdir(folder_to_track): file.exists= os.path.isfile(folder_destination + "/" + new_name) while file_exsist: self.i += 1 new_name = "new_file"+ str(self.i) + ".txt" file_exsist = os.pathisfile(folder_destination + "/" + new_name) src = folder_to_track + "/"+ filename new_destination = folder_destination + "/" + new_name os.rename(src, new_destination) folder_to_track = "\Users\Åmar\Desktop\myFolder" folder_destination = "\Users\Åmar\Desktop\myFolder" EventHandler = Myhandler() Observer=Observer() Observer.Schedule(Event_Handler, folder_to_track, recursive = True) Observer.start try: while True: time.sleep(10) except KeyboardInterrupt.
def __init__( self, filename, seq_len=8 * 24, # 7 days of history and 1 day to predict step=1, avg_size=None, # no average avg_stride=None, norm_len=7 * 24, # normalize wrt the 7 days of history fraction=0.95, ): basename = os.path.splitext(os.path.basename(filename))[0] train_dataset = os.path.join("dataset", basename + "_train.json") test_dataset = os.path.join("dataset", basename + "_test.json") if os.path.isfile(train_dataset) and os.pathisfile(test_dataset): print("Found dataset files") else: print("Dataset files not found. Creating datasets") dataset = self.process_data(filename, seq_len, step, avg_size, avg_stride, norm_len) train, test = self.split_dataset(dataset, fraction, shuffle=True) # TODO: save train and test self.train = CryptoData(train) self.test = CryptoData(test)
def dea_full_data_update(chk): """ update deahk search database input: chk --- whether to request full data update: chk == 1:yes output: <deposit_dir>/Deahk/<group>/<msid>_full_data_<year>fits """ tyear = int(float(time.strftime("%Y", time.gmtime()))) cmd = 'ls ' + data_dir + 'Deahk_*/*_week_data.fits > ' + zspace os.system(cmd) data = mcf.read_data_file(zspace, remove=1) for ent in data: atemp = re.split('\/', ent) group = atemp[-2] btemp = re.split('_', atemp[-1]) msid = btemp[0] print("MSID: " + str(msid) + ' in ' + group) [cols, tbdata] = ecf.read_fits_file(ent) time = tbdata['time'] tdata = tbdata[msid] cols = ['time', msid] # #--- regular data update # if chk == 0: # #--- normal daily data update # ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid + '_full_data_' ofits = ofits + str(tyear) + '.fits' if os.pathisfile(ofits): ltime = ecf.find_the_last_entry_time(ofits) ctime = str(tyear + 1) + ':001:00:00:00' nchk = 0 # #--- if the data is over the year boundray, fill up the last year and create a new one for the new year # else: ofits = deposit_dir + 'Deahk_save/' + group + '/' + msid ofits = ofits + '_full_data_' + str(tyear - 1) + '.fits' nfits = deposit_dir + 'Deahk_save/' + group + '/' + msid nfits = nfits + '_full_data_' + str(tyear) + '.fits' ltime = ecf.find_the_last_entry_time(ofits) ctime = str(tyear) + ':001:00:00:00' nchk = 1 select = [(time > ltime) & (time < ctime)] stime = time[select] sdata = tdata[select] cdata = [stime, sdata] ecf.update_fits_file(ofits, cols, cdata) if nchk > 0: select = [time >= ctime] stime = time[select] sdata = tdata[select] cdata = [stime, sdata] ecf.create_fits_file(nfits, cols, cdata) # #--- start from beginning (year 1999) # else: for year in range(1999, tyear + 1): tstart = str(year) + ':001:00:00:00' tstart = Chandra.Time.DateTime(tstart).secs tstop = str(year + 1) + ':001:00:00:00' tstop = Chandra.Time.DateTime(tstop).secs select = [(time >= tstart) & (time < tstop)] stime = time[select] sdata = tdata[select] cdata = [stime, sdata] out = deposit_dir + 'Deahk_save/' + group + '/' if not os.path.isdir(out): cmd = 'mkdir ' + out out = out + msid + '_full_data_' + str(year) + '.fits' ecf.create_fits_file(out, cols, cdata)
except ImportError: print("\nInstall SpeechRecognition to use this feature." + "\nStarting text mode\n") if(args.gtts): try: from gtts import gTTS from pygame import mixer voice = "gTTS" except ImportError: import pyttsx print("\nInstall gTTS and pygame to use this feature." + "\nUsing pyttsx\n") else: import pyttsx kernel = amil.Kernel() if os.pathisfile("bot_brain.brn"): kernel.bootstrap(brainFile="bot_brain.brn") else: karnel.bootstrap(learnFiles="std-startup.xml", commands="load aiml b") # kernel.saveBrain("bot_brain.brn") # kernel now ready for use while True: if mode == "voice": respond = listen() else: response = raw_input("Talk to Sarah:") if response.lower()replace(" ","") in terminate: break Sarah_speech = kernel.respond(response) print "Sarah:" + Sarah_speech