def main(): try: sWeather = fetchJSON("http://api.openweathermap.org/data/2.5/weather?q=" + config['location'] + "&units=metric&APPID=" + config['api']) jWeather = json.loads(sWeather) data = processData(jWeather) saveSQLite(data) except urllib2.HTTPError: print "Failed to connect to OpenWeathcerMap" if (sensor_config['attiny_light_meter'] == True): try: bus = smbus.SMBus(I2C_BUS_NUMBER) light_level = bus.read_byte(LIGHT_SENSOR_ADDR) * 256 + bus.read_byte(LIGHT_SENSOR_ADDR); sensor_handler = sensor.sensor() sensor_handler.save_value(7, light_level) except IOError as e: print "Failed to fetch light level" elif (sensor_config['BH1750FVI_light_meter'] == True): try: light_level = readLight(); sensor_handler = sensor.sensor() sensor_handler.save_value(8, light_level) except IOError as e: print "Failed to fetch light level"
def main(): sensor_handler = sensor.sensor() global db_connection db_connection = sensor_handler.get_db_connection() c = db_connection.cursor() c.execute('DROP TABLE IF EXISTS sensor_values') c.execute('CREATE TABLE IF NOT EXISTS sensor_values(`Date` integer, `Sensor` integer, `Value` real)') source_connection = sqlite3.connect(os.path.dirname(os.path.realpath(__file__)) + '/data.db') cursor_source = source_connection.cursor() result = cursor_source.execute('SELECT * FROM readouts_external') for row in result: sensor_handler.insert_no_commit(row[0], 0, row[1]) # Temperature sensor_handler.insert_no_commit(row[0], 1, row[2]) # Humidity # print row result = cursor_source.execute('SELECT * FROM external_data') for row in result: sensor_handler.insert_no_commit(row[0], 2, row[1]) # Real Pressure sensor_handler.insert_no_commit(row[0], 4, row[2]) # Wind Speed sensor_handler.insert_no_commit(row[0], 5, row[3]) # Wind direction # print row c.execute('CREATE INDEX IF NOT EXISTS SENSOR_A ON sensor_values(`Date`, `Sensor`)') db_connection.commit()
def __init__(self): """ This will read the config files and set up the different listeners and what not """ self.config = read_config() self.buildMap() self.runMDP() self.sensor = sensor(self.config) self.moveDict = self.sensor.moveSensor.moveDict self.probDict = dict() self.probDict[0] = self.config["prob_move_forward"] self.probDict[1] = self.config["prob_move_backward"] self.probDict[2] = self.config["prob_move_left"] self.probDict[3] = self.config["prob_move_right"] self.count = 0 row, col = self.config["map_size"] for x in xrange(row): for y in xrange(col): if not (self.grid[x][y] == "W" or self.grid[x][y] == "P"): self.count += 1 avg = 1.0 / float(self.count) self.probGrid = [[ avg if not (cell == "W" or cell == "P") else 0 for cell in line ] for line in self.grid] util.print_2d_floats(self.probGrid) self.getToGoal()
def main(argv): #If True, data will print to shell as well as write to file #If False, data will only write to file verb = '' if len(sys.argv) == 2: if sys.argv[1] == '-v' or sys.argv[1] == '-verb' or sys.argv[ 1] == '-verbose': verb = True else: print("Invalid argument. Valid argument(s): -v[erbose]") else: verb = False #Initialize file path for readings fpath = "/home/pi/MSD-P21422-BSF/Readings/" #Initialize data variables in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2 = 0, 0, 0, 0, 0, 0, 0 today, now = '0', '0' dbx = 0 db_connect = False #Check if connected to internet if check_connection(): #If connected to internet, establish dropbox connection dbx, db_connect = db_access() while True: today, now = cur_date_time(today, now, verb) file_name = date.strftime(date.today(), '%Y%m%d.csv') full_path = fpath + file_name in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2 = \ sensor.sensor(in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2, verb) heat_stat, hum_stat, fan_stat, light_stat = relay.relay( in_temp_f, in_hum, co2, verb) write_to_csv(in_temp_f, in_temp_c, out_temp_f, out_temp_c, in_hum, out_hum, co2, today, now, heat_stat, hum_stat, fan_stat, light_stat, full_path) # Check the internet connection if check_connection(): #If connected to internet but not dropbox if db_connect == False: #Establish dropbox connection dbx, db_connect = db_access() #If connected to internet and to dropbox if db_connect == True: #Upload file to dropbox print("Uploading the file...") upload('/' + file_name, full_path, dbx) print("Upload successful") #TO-DO: #If connection is down for more than a day, add a case to upload files that were missed #sleep in seconds. 60 = 1 minute, 300 = 5 minutes, 1800 = 30 minutes time.sleep(599.0) return
def start(self): if self.interval != 0: logger.debug('Sensor Thread init - ' + self.name) self.obj = sensor.sensor(self.no, self.name, self.interval) #self.obj.setName(self.name) logger.debug('Starting Sensor Thread - ' + self.name) self.obj.start() self.type_setup()
def __init__(self): threading.Thread.__init__(self) thread = threading.Thread(target=self.run) thread.daemon = True for i in range(len(self.sensors_list)): self.sensors.append( sensor(self.sensors_list[i], self.sensors_pins[i], self.sensors_thresholds[i])) thread.start()
def main(): global threads nsensors = 2 for i in xrange(nsensors): threads.append(sensor(i,'batt')) threads.append(sensor(i,'light')) threads.append(sensor(i,'temp')) map( lambda x : x.start(),threads ) myscreen = curses.initscr() myscreen.border(0) while True: myscreen.refresh() for i,j in enumerate(threads): myscreen.addstr(i+2, 2, str(j)) sleep(1) curses.endwin()
def main(): sensor_handler = sensor.sensor() FORMAT = '%(asctime)-15s %(message)s' logging.basicConfig(filename=os.path.dirname(os.path.realpath(__file__)) + '/dht22.log', level=logging.DEBUG, format=FORMAT) logger = logging.getLogger('dht22') print "DHT22 Sensor:" readout = None counter = 0 try: pi = pigpio.pi() except ValueError: print "Failed to connect to PIGPIO (%s)" logger.error('Failed to connect to PIGPIO (%s)', ValueError) try: sensor_data = DHT22.sensor(pi, 17) except ValueError: print "Failed to connect to DHT22" logger.error('Failed to connect to DHT22 (%s)', ValueError) while (readout == None and counter < 5): counter += 1 # Get data from sensor sensor_data.trigger() time.sleep(0.2) humidity = sensor_data.humidity() temperature = sensor_data.temperature() if humidity != None and temperature != None and humidity >= 0 and humidity <= 100: readout = [humidity, temperature] saveSQLite(readout) sensor_handler.save_value(0, temperature) sensor_handler.save_value(1, humidity) print "Humidity: " + str(humidity) print "Temperature: " + str(temperature) counter = 10 else: time.sleep(5)
def STA_generation(amount, radius, RTS_enable, CWmin, CWmax, system_AP): STA_list = [] import math, random for i in range(1, amount + 1): alpha = random.random() * 2 * math.pi r = math.sqrt(random.random()) * radius x = r * math.cos(alpha) y = r * math.sin(alpha) STA_list.append( sensor.sensor(i, CWmin, CWmax, [x, y], RTS_enable, False, system_AP)) return STA_list
def main(): s = sensor.sensor() dist = s.get_distance() if dist != None: level = tank_height - dist print "level=" + str(level) logger.log_level(level) else: print "Error in monitor. Failed to get level"
def main(): sensor_handler = sensor.sensor(); FORMAT = '%(asctime)-15s %(message)s' logging.basicConfig(filename=os.path.dirname(os.path.realpath(__file__)) + '/dht22.log', level=logging.DEBUG, format=FORMAT) logger = logging.getLogger('dht22') print "DHT22 Sensor:" readout = None counter = 0 try: pi = pigpio.pi() except ValueError: print "Failed to connect to PIGPIO (%s)" logger.error('Failed to connect to PIGPIO (%s)', ValueError); try: sensor_data = DHT22.sensor(pi, 17) except ValueError: print "Failed to connect to DHT22" logger.error('Failed to connect to DHT22 (%s)', ValueError); while (readout == None and counter < 5): counter += 1 # Get data from sensor sensor_data.trigger() time.sleep(0.2) humidity = sensor_data.humidity() temperature = sensor_data.temperature() if humidity != None and temperature != None and humidity >= 0 and humidity <= 100: readout = [humidity, temperature] saveSQLite(readout) sensor_handler.save_value(0, temperature) sensor_handler.save_value(1, humidity) print "Humidity: " + str(humidity) print "Temperature: " + str(temperature) counter = 10 else: time.sleep(5)
def main(): sensor_handler = sensor.sensor(); temperature = sensor_handler.get_last_value(0) humidity = sensor_handler.get_last_value(1) pressure = sensor_handler.get_last_value(2) callScript = "curl -d 'pressure="+str(pressure)+"&humidity="+str(humidity)+"&temp="+str(temperature)+"&"+config['coords']+"' --user '"+config['user']+":"+config['password']+"' http://openweathermap.org/data/post" # print callScript p = subprocess.Popen(callScript, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def __init__( self ): Thread.__init__( self ); self.send_dt = 0.1; self.is_to_break = False; self.s = sensor( ); self.sock = bt.BluetoothSocket( bt.RFCOMM ); with open( 'bt_address.txt', 'r' ) as f: for line in f: self.hostaddr = line.rstrip(); # # return;
def __init__(self): Thread.__init__(self) self.send_dt = 0.1 self.is_to_break = False self.s = sensor() self.sock = bt.BluetoothSocket(bt.RFCOMM) with open('bt_address.txt', 'r') as f: for line in f: self.hostaddr = line.rstrip() # # return
def long_press( self ): if button.is_blocked: return; else: button.is_blocked = True; # try: button.indicator.set_ratio_period( 1. ); s = sensor( ); s.calib( ); except: pass; # button.indicator.set_ratio_period( 0.0 ); button.is_blocked = False;
def main(): sensor_handler = sensor.sensor(); temperature = sensor_handler.get_last_value(0) humidity = sensor_handler.get_last_value(1) pressure = sensor_handler.get_last_value(2) encodedAttributes = { 'key': config['api'], 'field1': temperature, 'field2': humidity, 'field3': pressure } req = urllib2.Request(config['url'] + "?" + urllib.urlencode(encodedAttributes)) response=urllib2.urlopen(req) print "Posted!"
def processData(json): out = {} sensor_handler = sensor.sensor() bmp_180_sensor = BMP085.BMP085() out['pressure'] = round(bmp_180_sensor.read_sealevel_pressure(35) / 100, 1) out['wind-direction'] = json["wind"]["deg"] out['wind-speed'] = json["wind"]["speed"] # FIXME this is a hack, whole process should be rewritten sensor_handler.save_value(2, round(bmp_180_sensor.read_sealevel_pressure(35) / 100, 1)) sensor_handler.save_value(3, json["main"]["pressure"]) sensor_handler.save_value(4, json["wind"]["speed"]) sensor_handler.save_value(5, json["wind"]["deg"]) sensor_handler.save_value(6, bmp_180_sensor.read_temperature()) # Save internal temperature readout return out
def main(): sensor_handler = sensor.sensor() temperature = sensor_handler.get_last_value(0) humidity = sensor_handler.get_last_value(1) pressure = sensor_handler.get_last_value(2) callScript = "curl -d 'pressure=" + str(pressure) + "&humidity=" + str( humidity) + "&temp=" + str(temperature) + "&" + config[ 'coords'] + "' --user '" + config['user'] + ":" + config[ 'password'] + "' http://openweathermap.org/data/post" # print callScript p = subprocess.Popen(callScript, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def __init__(self, room_id, numRows, *attrs): self.room_mod = None self.sensors = {} if attrs: attrs = attrs[0] for i, sensorID in enumerate(attrs['info']): sensor_attributes = { 'id': sensorID, 'info': attrs['info'][sensorID] } self.sensors[i] = sensor('thermostat', numRows, sensor_attributes) self.id = attrs['id'] else: self.id = np.int64(str(uuid.uuid4().int)[:7]) num_thermostat = rd.randint(1, 2) num_co2 = rd.randint(1, 3) self.add_sensors(num_thermostat, num_co2, numRows)
def main(self, n, node_id): self.node_id = node_id # Initialize and start the listener thread listener_thread = listener.listener() listener_thread.set_node_id(node_id) listener_thread.set_node_count(n) listener_thread.start() # Initialize and start the protocol handler thread aodv_thread = aodv.aodv() aodv_thread.set_node_id(node_id) aodv_thread.set_node_count(n) aodv_thread.start() # Initialize and start the vehicle simulation thread sensor_thread = sensor.sensor() sensor_thread.set_node_id(node_id) sensor_thread.daemon = True sensor_thread.start()
def main(): api = tweetpony.API(consumer_key=config['consumer_key'], consumer_secret=config['consumer_secret'], access_token=config['access_token'], access_token_secret=config['access_token_secret']) user = api.user sensor_handler = sensor.sensor() temperature = sensor_handler.get_last_value(0) humidity = sensor_handler.get_last_value(1) pressure = sensor_handler.get_last_value(2) try: api.update_status(status=u'Witaj Dobra, mamy właśnie ' + unicode(str(temperature)) + u'C i ' + unicode(str(humidity)) + u'% wilgotności') except tweetpony.APIError as err: print "Oops, something went wrong! Twitter returned error #%i and said: %s" % ( err.code, err.description) else: print "Yay! Your tweet has been sent!"
def main(): sensor_handler = sensor.sensor() global db_connection db_connection = sensor_handler.get_db_connection() c = db_connection.cursor() c.execute('DROP TABLE IF EXISTS sensor_values') c.execute( 'CREATE TABLE IF NOT EXISTS sensor_values(`Date` integer, `Sensor` integer, `Value` real)' ) source_connection = sqlite3.connect( os.path.dirname(os.path.realpath(__file__)) + '/data.db') cursor_source = source_connection.cursor() result = cursor_source.execute('SELECT * FROM readouts_external') for row in result: sensor_handler.insert_no_commit(row[0], 0, row[1]) # Temperature sensor_handler.insert_no_commit(row[0], 1, row[2]) # Humidity # print row result = cursor_source.execute('SELECT * FROM external_data') for row in result: sensor_handler.insert_no_commit(row[0], 2, row[1]) # Real Pressure sensor_handler.insert_no_commit(row[0], 4, row[2]) # Wind Speed sensor_handler.insert_no_commit(row[0], 5, row[3]) # Wind direction # print row c.execute( 'CREATE INDEX IF NOT EXISTS SENSOR_A ON sensor_values(`Date`, `Sensor`)' ) db_connection.commit()
def __init__(self, name, pin0=18, pin1=23, pin2=24, pin3=25, simulation=True): #GPIO: 18 23 24 25 #pin : 12 16 18 22 self.logger = logging.getLogger('myQ.quadcptr') self.name = name self.simulation = simulation self.version = 1 self.motor = [motor('M' + str(i), 0) for i in xrange(4)] self.motor[0] = motor('M0', pin0, kv=1000, WMin=0, WMax=100, simulation=self.simulation) self.motor[1] = motor('M1', pin1, kv=1000, WMin=0, WMax=100, simulation=self.simulation) self.motor[2] = motor('M2', pin2, kv=1000, WMin=0, WMax=100, simulation=self.simulation) self.motor[3] = motor('M3', pin3, kv=1000, WMin=0, WMax=100, simulation=self.simulation) self.sensor = sensor(simulation=self.simulation) self.pidR = pid() self.pidP = pid() self.pidY = pid() self.pidR_rate = pid() self.pidP_rate = pid() self.pidY_rate = pid() self.ip = '192.168.1.1' self.netscan = netscan(self.ip) self.webserver = webserver(self) self.display = display(self) self.rc = rc(self.display.screen) self.imulog = False self.savelog = False self.calibIMU = False self.debuglev = 0 self.netscanning = False #for quadricopter phisics calculations- not used yet self.prop = prop(9, 4.7, 1) self.voltage = 12 # [V] self.mass = 2 # [Kg] self.barLenght = 0.23 # [mm] self.barMass = 0.15 # [kg] self.datalog = '' self.initLog()
def run(args): #if __name__=="__main__": # initialize parameters of interest # Method: # 0: linear policy # 1: RBF policy # 2: MLP policy #method = args[0] #RBF_components = args[1] #MLP_neurons = args[2] process_index = args[3] folder_name = args[4] np.random.seed(process_index+100) #process_index = 0 #np.random.seed(process_index + 100) #vel_var = args[5] #num_targets = args[6] method = 0 RBF_components = 20 MLP_neurons = 50 vel_var = .001 num_targets = min(6,max(2,np.random.poisson(3))) num_targets = np.random.randint(2,10) #num_targets = 4 print("Starting Thread:" + str(process_index)) #Initialize all the parameters params ={0:{},1:{},2:{}} if method==0: params[0]["weight2"] = np.random.normal(0, .3, [2, num_states_layer2]) #params[0]["weight2"] = np.array([[ 3.97573312, 0.4639474 , 2.27280486, 12.9085868 , # 3.45722461, 6.36735166], #[-11.87940874, 2.59549414, -5.68556954, 2.87746786, # 7.08059984, 5.5631133 ]]) params[0]["weight"] = np.array([[7.18777985, -13.68815256, 1.69010242, -5.62483187, -4.30451483, 10.09592853], [13.33104057, 13.60537864, 3.46939294, 0.8446329, -14.79733566, -4.78599648]]) #params[0]["weight"] = np.array([[ 1.45702249, -1.17664153, -0.11593174, 1.02967173, -0.25321044, #0.09052774], #[ 0.67730786, 0.3213561 , 0.99580938, -2.39007038, -1.16340594, #-1.77515938]]) elif method==1: featurizer = sklearn.pipeline.FeatureUnion([("rbf1", RBFSampler(gamma=rbf_var, n_components=RBF_components, random_state=1))]) featurizer.fit(np.array(list_of_states)) # Use this featurizer for normalization params[1]["weight"] = np.random.normal(0, 1, [2, RBF_components]) elif method==2: params[2]["weigh1"] = np.random.normal(0, 1, [MLP_neurons, num_states]) params[2]["bias1"] = np.random.normal(0,1,[MLP_neurons,1]) params[2]["weigh2"] = np.random.normal(0, 1, [2, MLP_neurons]) params[2]["bias2"] = np.random.normal(0, 1, [2, 1]) return_saver = [] error_saver = [] episode_counter = 0 weight_saver1 = [] weight_saver2 = [] weight_saver2_1 = [] weight_saver2_2 = [] #for episode_counter in range(0,N_max): #Training parameters avg_reward = [] avg_error = [] var_reward = [] training = True result_folder = base_path+folder_name+"/" reward_file = open(result_folder+"reward_noise:"+str(vel_var)+"_"+str(process_index)+ "_linear_6states.txt","a") error_file = open(result_folder + "error_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") error_file_median = open(result_folder + "error_median_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_file = open(result_folder + "var_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") var_error_file = open(result_folder + "var_error_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") weight_file = open(result_folder + "weight_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") #flatten initial weight and store the values if method==0: weight = params[0]['weight'] flatted_weights = list(weight[0, :]) + list(weight[1, :]) temp = [] [temp.append(str(x)) for x in flatted_weights] weight_file.write("\t".join(temp)+"\n") elif method==1: weight = params[1]['weight'] flatted_weights = list(weight[0, :]) + list(weight[1, :]) temp = [] [temp.append(str(x)) for x in flatted_weights] weight_file.write("\t".join(temp) + "\n") elif method==2: pass #weight = np.reshape(np.array(weights[0]), [2, 6]) init_max_target = 3 num_targets = init_max_target while episode_counter<N_max: if episode_counter%1000==0 and episode_counter>0: init_max_target +=1 init_max_target = min(20,init_max_target) if episode_counter%100==0 and episode_counter>0: num_targets = np.random.randint(3,init_max_target+1) sigma = gen_learning_rate(episode_counter,sigma_max,.1,5000) sigma = sigma_max discounted_return = np.array([]) discount_vector = np.array([]) #print(episodes_counter) scen = scenario(1,1) bearing_var = 1E-2#variance of bearing measurement #Target information x = 10000*np.random.random([num_targets])-5000#initial x-location y = 10000 * np.random.random([num_targets]) - 5000#initial y-location xdot = 10*np.random.random([num_targets])-5#initial xdot-value ydot = 10 * np.random.random([num_targets]) - 5#initial ydot-value #TEMP #x = [2000,-2000] #y = [2000,2000] #xdot = [1,1] #ydot = [-1,-1] init_target_state = [] init_for_smc = [] for target_counter in range(0,num_targets): init_target_state.append([x[target_counter],y[target_counter],xdot[target_counter],ydot[target_counter]])#initialize target state init_for_smc.append([x[target_counter]+np.random.normal(0,5),y[target_counter] +np.random.normal(0,5),np.random.normal(0,5),np.random.normal(0,5)])#init state for the tracker (tracker doesn't know about the initial state) #temp_loc = np.array(init_target_state[0:2]).reshape(2,1) #init_location_estimate = temp_loc+0*np.random.normal(np.zeros([2,1]),10) #init_location_estimate = [init_location_estimate[0][0],init_location_estimate[1][0]] #init_velocity_estimate = [6*random.random()-3,6*random.random()-3] #init_velocity_estimate = [init_target_state[2],init_target_state[3]] #init_estimate = init_location_estimate+init_velocity_estimate init_covariance = np.diag([MAX_UNCERTAINTY,MAX_UNCERTAINTY,MAX_UNCERTAINTY,MAX_UNCERTAINTY])#initial covariance of state estimation t = [] for i in range(0,num_targets): t.append(target(init_target_state[i][0:2], init_target_state[i][2], init_target_state[i][3], vel_var, vel_var, "CONS_V"))#constant-velocity model for target motion A, B = t[0].constant_velocity(1E-10)#Get motion model x_var = t[0].x_var y_var = t[0].y_var tracker_object = [] for i in range(0,num_targets): tracker_object.append(EKF_tracker(init_for_smc[i], np.array(init_covariance), A,B,x_var,y_var,bearing_var))#create tracker object #smc_object = smc_tracker(A,B,x_var,y_var,bearing_var,1000,np.array(init_for_smc)) #Initialize sensor object if method==0: s = sensor("POLICY_COMM_LINEAR")#create sensor object (stochastic policy) elif method==1: s = sensor("POLICY_COMM_RBF") elif method==2: s = sensor("POLICY_COMM_MLP") measure = measurement(bearing_var)#create measurement object m = [] x_est = []; y_est = []; x_vel_est = []; y_vel_est = [] x_truth = []; y_truth = []; x_vel_truth = []; y_vel_truth = [] uncertainty = [] vel_error = [] pos_error = [] iteration = [] innovation = [] for i in range(0,num_targets): x_truth.append([]) y_truth.append([]) x_vel_truth.append([]) y_vel_truth.append([]) uncertainty.append([]) vel_error.append([]) x_est.append([]) y_est.append([]) x_vel_est.append([]) y_vel_est.append([]) pos_error.append([]) innovation.append([]) reward = [] episode_condition = True n=0 violation = 0 #store required information episode_state = [] episode_state_out_layer = [] episode_MLP_state = [] episode_actions = [] avg_uncertainty= [] max_uncertainty = [] while episode_condition: temp_m = [] input_state_temp = [] for i in range(0,num_targets): t[i].update_location() temp_m.append(measure.generate_bearing(t[i].current_location,s.current_location)) m.append(temp_m) temp_reward = [] target_actions = [] for i in range(0,num_targets): tracker_object[i].update_states(s.current_location, m[-1][i]) normalized_innovation = (tracker_object[i].innovation_list[-1])/tracker_object[i].innovation_var[-1] #print(normalized_innovation) #if (normalized_innovation<1E-4 or n<10) and n<200: #end of episode current_state = list(tracker_object[i].x_k_k.reshape(len(tracker_object[i].x_k_k))) + list(s.current_location) #print(current_state) #state normalization x_slope = 2.0/(scen.x_max-scen.x_min) y_slope = 2.0 / (scen.y_max - scen.y_min) x_slope_sensor = 2.0 / (40000) y_slope_sensor = 2.0 / (40000) vel_slope = 2.0/(scen.vel_max-scen.vel_min) #normalization current_state[0] = -1+x_slope*(current_state[0]-scen.x_min) current_state[1] = -1 + y_slope * (current_state[1] - scen.y_min) current_state[2] = -1 + vel_slope * (current_state[2] - scen.vel_min) current_state[3] = -1 + vel_slope * (current_state[3] - scen.vel_min) current_state[4] = -1 + x_slope * (current_state[4] -scen.x_min) current_state[5] = -1 + y_slope * (current_state[5] - scen.y_min) #Refactor states based on the usage if method==0 or method==2: input_state = current_state input_state_temp.append(input_state) #store input-sates elif method==1: #Generate states for the RBF input input_state = featurizer.transform(np.array(current_state).reshape(1,len(current_state))) input_state = list(input_state[0]) target_actions.append(s.generate_action(params,input_state,.01)) estimate = tracker_object[i].x_k_k episode_state.append(input_state) ####Neeed to get modified if method==2: episode_MLP_state.append(extra_information) #need to get modified truth = t[i].current_location x_est[i].append(estimate[0]) y_est[i].append(estimate[1]) x_vel_est[i].append(estimate[2]) y_vel_est[i].append(estimate[3]) x_truth[i].append(truth[0]) y_truth[i].append(truth[1]) x_vel_truth[i].append(t[i].current_velocity[0]) y_vel_truth[i].append(t[i].current_velocity[1]) vel_error[i].append(np.linalg.norm(estimate[2:4]-np.array([t[i].current_velocity[0],t[i].current_velocity[1]]).reshape(2,1))) pos_error[i].append(np.linalg.norm(estimate[0:2]-np.array(truth).reshape(2,1))) innovation[i].append(normalized_innovation[0]) unormalized_uncertainty = np.sum(tracker_object[i].p_k_k.diagonal()) #if unormalized_uncertainty>MAX_UNCERTAINTY: # normalized_uncertainty = 1 #else: # normalized_uncertainty = (1.0/MAX_UNCERTAINTY)*unormalized_uncertainty uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty) #if len(uncertainty[i])<window_size+window_lag: # temp_reward.append(0) #else: # current_avg = np.mean(uncertainty[i][-window_size:]) # prev_avg = np.mean(uncertainty[i][-(window_size+window_lag):-window_lag]) # if current_avg<prev_avg or uncertainty[i][-1]<.1: #if current_avg < prev_avg: # temp_reward.append(1) #else: # temp_reward.append(0) this_uncertainty = [] [this_uncertainty.append(uncertainty[x][-1]) for x in range(0, num_targets)] avg_uncertainty.append(np.mean(this_uncertainty)) max_uncertainty.append(np.max(this_uncertainty)) if len(avg_uncertainty) < window_size + window_lag: reward.append(0) else: current_avg = np.mean(avg_uncertainty[-window_size:]) prev_avg = np.mean(avg_uncertainty[-(window_size + window_lag):-window_lag]) if current_avg < prev_avg or avg_uncertainty[-1] < .1: # if current_avg < prev_avg: reward.append(1) else: reward.append(0) #voting #if np.mean(temp_reward)>.5: # reward.append(np.mean(temp_reward)) #else: # reward.append(np.mean(temp_reward)) #if sum(reward)>1100 and num_targets>2: sys.exit(1) #Do something on target_actions #Create feature-vector from generated target actions normalized_state,index_matrix1,index_matrix2,slope = s.update_location_decentralized(target_actions,sigma,params) #Update the sensor location based on all individual actions #index_matrix: an n_s \times T matrix that shows the derivative of state in the output layer to the action space in the internal-layer backpropagated_to_internal_1 = index_matrix1.dot(np.array(input_state_temp))#8 by 6 backpropagated_to_internal_2 = index_matrix2.dot(np.array(input_state_temp))# 8 by 6 episode_state_out_layer.append(normalized_state) episode_state.append([backpropagated_to_internal_1,backpropagated_to_internal_2]) #each entry would be a T \times 6 matrix with T being the number of targets #reward.append(-1*uncertainty[-1]) #update return discount_vector = gamma*np.array(discount_vector) discounted_return+= (1.0*reward[-1])*discount_vector new_return = 1.0*reward[-1] list_discounted_return = list(discounted_return) list_discounted_return.append(new_return) discounted_return = np.array(list_discounted_return) list_discount_vector = list(discount_vector) list_discount_vector.append(1) discount_vector = np.array(list_discount_vector) iteration.append(n) if n>episode_length: break n+=1 #Based on the return from the episode, update parameters of the policy model #Normalize returns by the length of episode #if episode_counter%10==0 and episode_counter>0: print(weight_saver[-1]) prev_params = dict(params) condition = True for i in range(0,num_targets): if np.mean(pos_error[i])>10000: condition = False break episode_condition = False episode_counter-=1 if not condition: #print("OOPSSSS...") continue #if episode_counter%100==0 and training: #print("Starting the evaluation phase...") #training = False #episode_condition = False condition = True if episode_condition and training: normalized_discounted_return = discounted_return episode_actions = s.sensor_actions #init_weight = np.array(weight) rate = gen_learning_rate(episode_counter,learning_rate,1E-12,20000) internal_rate = gen_learning_rate(episode_counter, 3*1E-5, 1E-15, 20000) total_adjustment = np.zeros(np.shape(weight)) for e in range(0,len(episode_actions)): #calculate gradiant #state = np.array(episode_state[e]).reshape(len(episode_state[e]),1) out_state = np.array(episode_state_out_layer[e]).reshape(len(episode_state_out_layer[e]),1) backpropagated_terms = episode_state[e] #calculate gradient if method==0: deriv_with_out_state = (episode_actions[e].reshape(2, 1) - params[0]['weight2'].dot(out_state)).transpose().dot(params[0]['weight2']) #1 by n_s==> derivative of F with respect to the output state-vector internal_gradiant1 = deriv_with_out_state.dot(backpropagated_terms[0]) #1 by 6 internal_gradiant2 = deriv_with_out_state.dot(backpropagated_terms[1]) #1 by 6 internal_gradiant = np.concatenate([internal_gradiant1,internal_gradiant2]) #gradiant = ((episode_actions[e].reshape(2,1)-params[0]['weight'].dot(state)).dot(state.transpose()))/sigma**2#This is the gradiant gradiant_out_layer = ((episode_actions[e].reshape(2, 1) - params[0]['weight2'].dot(out_state)).dot( out_state.transpose())) / sigma ** 2 # This is the gradiant elif method==1: gradiant = ((episode_actions[e].reshape(2, 1) - params[1]['weight'].dot(state)).dot( state.transpose())) / sigma ** 2 # This is the gradiant elif method==2: #Gradient for MLP pass if np.max(np.abs(gradiant_out_layer))>1E2 or np.max(np.abs(internal_gradiant))>1E2: #print("OOPPSSSS...") continue #clip large gradients if method==0: adjustment_term_out_layer = gradiant_out_layer*normalized_discounted_return[e]#an unbiased sample of return adjustment_term_internal_layer = internal_gradiant*normalized_discounted_return[e] params[0]['weight2'] += rate * adjustment_term_out_layer params[0]['weight'] += internal_rate* adjustment_term_internal_layer elif method==1: adjustment_term = gradiant * normalized_discounted_return[e] # an unbiased sample of return params[1]['weight'] += rate * adjustment_term elif method==2: #Gradient for MLP pass #if not condition: # weight = prev_weight # continue episode_counter+=1 flatted_weights1 = list(params[0]['weight'][0, :]) + list(params[0]['weight'][1, :]) flatted_weights2 = list(params[0]['weight2'][0, :]) + list(params[0]['weight2'][1, :]) temp1 = [] [temp1.append(str(x)) for x in flatted_weights1] temp2 = [] [temp2.append(str(x)) for x in flatted_weights2] weight_file.write("\t".join(temp1)+"$$$"+"\t".join(temp2)+"\n") #flatted_weights = list(weight[0, :]) + list(weight[1, :]) #temp = [] #[temp.append(str(x)) for x in flatted_weights] #weight_file.write("\t".join(temp)+"\n") weight_saver1.append(params[0]['weight'][0][0]) weight_saver2.append(params[0]['weight'][1][0]) weight_saver2_1.append(params[0]['weight2'][0][0]) weight_saver2_2.append(params[0]['weight2'][1][0]) else: #print("garbage trajectory: no-update") pass #if not training: return_saver.append(sum(reward)) error_saver.append(np.mean(pos_error)) #print(len(return_saver),n) if episode_counter%100 == 0 and episode_counter>0: # if episode_counter%100==0 and episode_counter>0: print(episode_counter, np.mean(return_saver), sigma) #print(params[method]['weight']) #weight = np.reshape(np.array(weights[episode_counter]), [2, 6]) #print(weight) reward_file.write(str(np.mean(sorted(return_saver,reverse=True)[0:int(.95*len(return_saver))]))+"\n") error_file.write(str(np.mean(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") error_file_median.write(str(np.median(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") var_error_file.write(str(np.var(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") var_file.write(str(np.var(sorted(return_saver,reverse=True)[0:int(.95*len(return_saver))]))+"\n") #weight_file.write(str(np.mean(return_saver)) + "\n") avg_reward.append(np.mean(sorted(return_saver)[0:int(.95*len(return_saver))])) avg_error.append(np.mean(sorted(error_saver)[0:int(.95*len(error_saver))])) var_reward.append(np.var(return_saver)) reward_file.close() var_file.close() error_file.close() error_file_median.close() var_error_file.close() weight_file.close() reward_file = open( result_folder + "reward_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") error_file = open( result_folder + "error_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_file = open( result_folder + "var_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_error_file = open( result_folder + "var_error_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") weight_file = open( result_folder + "weight_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") error_file_median = open( result_folder + "error_median_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") return_saver = [] error_saver = [] num_episodes.append(n)
def run(self): ''' This method is responsable of: 1. Instantiate the sensor objects 2. Identify if is running 'local' or not 3. Identify if the data generated will be save on the 'influxDB' 4. Generate the sensors data ''' try: logger.info('Emulator run triggered') ### instantiation of the sensors ### create the list that will 'host' the sensors objects listSensors = [] ### loop that control the sensors instantiation process for i in range(0, self.numSensors): listSensors.append(s.sensor()) ### if is not running local, create the client that connects to the MQTT broker if not self.local: ### get the MQTT broker address broker = config['mosquitto']['broker_address'] ### get the MQTT broker port port = int(config['mosquitto']['broker_port']) ### get the MQTT topic name structure topic_structure = config['mosquitto']['topic_structure'] ### Create the MQTT broker client mqtt_broker_client = paho.Client("sensor_Emulator") ### Connect to the MQTT broker mqtt_broker_client.connect(broker, port) ### control if the data generated by the sensors will be load to the local 'influxDB' if self.store_db: ### set InfluxDB database name db_name = 'sensor_data' ### Create the InfluxDB client client = InfluxDBClient('localhost', 8086, 'root', 'root', db_name) ### Drop the 'db_name' database, if exist (the idea is to always start with an empty DB) client.drop_database(db_name) ### Create the InfluxDB database, using the 'db_name' value as a name client.create_database(db_name) ### Generate the data and publish / print while True: ### Set the timestamp timestamp = datetime.fromtimestamp(time.time()) if self.store_db: ### InfluxDB works with UTC, so I'm adjusting the timestamp to looks correctly on Grafana ### At the moment I'm UTC + 1, this is why I'm adjusting -1 hour timestamp = timestamp - timedelta(hours=1, minutes=0) ### This loop will, for each sensor: ### 1. generate the data ### 2. Create the message (that will be publish to the MQTT topic) ### 3. Publish the data ### 4. if 'store_db is True', load the data to the influxDB for i in range(0, self.numSensors): ### run the sensor method that generates the data data = listSensors[i].generateData() ### check if is running in 'local mode' if self.local: ### Set/Create the message message = "sensor_" + str(i + 1) + "|" + str( timestamp.strftime( '%Y-%m-%d|%H:%M:%S')) + "|" + str(data) ### print the message on the terminal print(message) ### on the case that it is not running locally else: ### Set/Create the message message = str(timestamp.strftime( '%Y-%m-%d|%H:%M:%S')) + "|" + str(data) ### create a topic topic = topic_structure.replace( "[SENSOR_ID]", str(i + 1)) ### publish the data to the topic mqtt_broker_client.publish(topic, message) logger.info("topic|" + str(topic) + "|message|" + message) ### on the case that the data will be load to 'influxDB' if self.store_db: ### Set the name of the table table_name = "sensor_data" ### Specify the format of the date-time date_format = '%Y-%m-%dT%H:%M:%S%Z' ### Create the json structure to load to 'influxDB' json_body = [{ "measurement": table_name, "time": str(timestamp.strftime(date_format) + "Z"), "tags": { "sensorId": str(i + 1) }, "fields": { "value": data, "tic": 1 } }] ### Load the data to 'influxDB' client.write_points(json_body) logger.info("data inserted on db: " + db_name + "table: " + table_name) logger.info("data inserted:" + str(json_body)) ### It is here that the time is controled (the frequency that the data is generated) time.sleep(self.frequency_in_seconds) except Exception as e: logger.exception(e)
#! /usr/bin/python2.7 import time import datetime from sensor import sensor from MysqlConnect import MySqlConnect sens = sensor() #sens.initGPIO() database = MySqlConnect() database.getConnect() data = {} while(1): dt = str(datetime.datetime.today()).split('.')[0] sens.readTemperature() data['id'] = sens.getID() data['name'] = str(sens.getName()) data['address'] = 9 # dt # str(sens.getAddress()) data['temperature'] = sens.getTemperature() if sens.getTemperature() < 30 : data['type'] = 'normal' else: data['type'] = 'danger' data['lat'] = 58.328315 data['lng'] = 56.245774
def stopSensors(self): for sense in self.sensorMap: sense = sensor.sensor(sense) sense.update(0) logger.debug('Sensor ' + sense.port + ' stopping') return self.getAllData()
init_target_state[i][3], vel_var, vel_var, "CONS_V")) #constant-velocity model for target motion A, B = t[0].constant_velocity(1E-10) #Get motion model x_var = t[0].x_var y_var = t[0].y_var tracker_object = [] for i in range(0, num_targets): tracker_object.append( EKF_tracker(init_for_smc[i], np.array(init_covariance), A, B, x_var, y_var, bearing_var)) #create tracker object #smc_object = smc_tracker(A,B,x_var,y_var,bearing_var,1000,np.array(init_for_smc)) #Initialize sensor object if method == 0: s = sensor("POLICY_COMM_LINEAR" ) #create sensor object (stochastic policy) elif method == 1: s = sensor("POLICY_COMM_RBF") elif method == 2: s = sensor("POLICY_COMM_MLP") measure = measurement(bearing_var) #create measurement object m = [] x_est = [] y_est = [] x_vel_est = [] y_vel_est = [] x_truth = [] y_truth = [] x_vel_truth = [] y_vel_truth = []
def main(): s = sensor.sensor() logger.log_level(s.get_distance())
#!/usr/bin/python3 from sensor import sensor import time from motors import Actuation from mapping import Mapping ################## ### PARAMETERS ### ################## CAMERA_NUM = 2 # 1 for PiCamera, 2 for USB Camera ############### ### GLOBALS ### ############### cam = sensor(CAMERA_NUM) ctrl = Actuation() mapping = Mapping() def main(): cam.start() while True: if cam.stopped: break cards, num = cam.get_cards() if num != 0: print(num) print("Card: {} {}".format(cards[0].best_rank_match, cards[0].best_suit_match)) time.sleep(0.2)
def getAllData(self): sensorData = [] for sense in self.sensorMap: sense = sensor.sensor(sense) sensorData += [sense.collect()] return sensorData
from loggingQ import setupLogger import argparse logger = setupLogger('myQ', True, 'sensor_test.log') calibIMU = False parser = argparse.ArgumentParser() parser.add_argument('-c', dest='calibIMU', action='store_true', help='Calibrate IMU') parser.add_argument('-i', dest='imulog', action='store_true', help='save IMU data log: myQ_sensor.csv') args = parser.parse_args() calibIMU = args.calibIMU imuLog = args.imulog mySensor = sensor(imulog=imuLog, simulation=False) if calibIMU: mySensor.calibrate() mySensor.start() screen = curses.initscr() # turn off input echoing curses.noecho() # respond to keys immediately (don't wait for enter) curses.cbreak() # map arrow keys to special values screen.keypad(True) #timeout in millis
initEsc = args.initEsc gpio = args.gpio logger = setupLogger("myQ") print("gpio: " + str(gpio)) print("initEsc: " + str(initEsc)) mymotor = motor("m1", gpio, simulation=False) # where 18 is GPIO18 = pin 12 # GPIO23 = pin 16 # GPIO24 = pin 18 # GPIO25 = pin 22 mySensor = sensor() mySensor.start() print("***Press ENTER to start") res = raw_input() mymotor.start() # TODO the next line code is necessary to INITIALIZE the ESC to a desired MAX PWM # I suggest to run this line at least once, for each esc # in order to obtain the same behaviour in all the ESCs # use arg -i to use this line if initEsc: print("***Disconnect ESC power") print("***then press ENTER") res = raw_input() mymotor.setW(100)
lg = piLog() lg.openLog(piServer.serverLog, moduleLocalName, configXML.verbose) lg.log(clock_milliseconds(), "info", "starting " + moduleLocalName + " - PID : " + str(os.getpid())) lg.log(clock_milliseconds(), "info", "port " + moduleLocalName + " : ") #$::piServer::portNumber(${::moduleLocalName})" lg.log(clock_milliseconds(), "info", "confXML : ") #$confXML") # On affiche les infos dans le fichier de debug for element in [attr for attr in dir(configXML()) if not callable(attr) and not attr.startswith("__")]: lg.log(clock_milliseconds(), "info", element + " : " + getattr(configXML,element)) # Démarrage du serveur lg.log(clock_milliseconds(), "info", "starting serveur") piServ = piServer( moduleLocalName, configXML.verbose, "0.0.0.0", piServer.serverAcqSensorUSB) ssor = sensor.sensor(moduleLocalName, configXML.verbose, piServer.serverLog) readIsDone = 0 while True: # On écoute si un client veut se connecter piServ.listen() # Toute les 5 secondes on vient lire les différents capteurs time.sleep(0.01) epoch_time = int(time.time()) if epoch_time % 5 == 0: if readIsDone == 0 : ssor.readSensor(0)
# -*- coding: utf-8 -*- from sensor import sensor import curses mySensor = sensor() mySensor.start() screen = curses.initscr() # turn off input echoing curses.noecho() # respond to keys immediately (don't wait for enter) curses.cbreak() # map arrow keys to special values screen.keypad(True) try: cycling = True while cycling: s = '|roll: ' + str(mySensor.roll) s += '|pitch: ' + str(mySensor.pitch) s += '|yaw: ' + str(mySensor.yaw) screen.clear() screen.addstr(1, 1, 'Press any button to stop') screen.addstr(2, 2, s) #timeout in millis screen.timeout(500) #getch returns -1 if timeout res = screen.getch()
from sensor import sensor ### NEED TO BE CHANGED REGULARLY ### start_date = "2021-02-01" end_date = "2021-02-07" ### LESS LIKELY TO CHANGE ### pm25_y_label = "PM 2.5 (medijana)" chart_title_format = "{sensor_name} za nedelju od {start_date} do {end_date}" all_sensors = [ sensor(name="Devet Jugovica", id="esp8266-8000692"), sensor(name="Starine Novaka", id="esp8266-8023432") ] delta_in_days = 1 days = ["PON", "UTO", "SRE", "CET", "PET", "SUB", "NED"] output_folder = "out" output_image_format = "png" output_txt_format = "txt" PM10_INDEX = 7 PM25_INDEX = 8 url_pattern = "https://api-rrd.madavi.de/data_csv/csv-files/{date_part}/data-{sensor_id}-{date_part}.csv" filename_pattern = "data-{date_part}.csv" csv_folder = "data"
def __init__(self): self.ToF = sensor()
n._set_loco(nn, l.speed, l.reverse, l.opts) l = logger(console=True) l._add_trace('controller') l._add_trace('serial') try: dev = sys.argv[1] except IndexError: dev = None d = dispatcher() n = nce_controller(dev) usb_listener.make_listeners() for i in range(12): sensor(str(i + 1)) curloco = 0 loco_infos = {} while True: cmd = input('command: ').strip() if cmd == '': break try: cmd0 = cmd[0] rest = cmd[1:].strip() if cmd0 == 'p': n.ping() elif cmd0 == 'l': curloco = int(rest) get_loco(curloco)
MAX_UNCERTAINTY, MAX_UNCERTAINTY, MAX_UNCERTAINTY, MAX_UNCERTAINTY ]) #initial covariance of state estimation t = target(init_target_state[0:2], init_target_state[2], init_target_state[3], .1, .1, "CONS_V") #constant-velocity model for target motion A, B = t.constant_velocity(1E-10) #Get motion model x_var = t.x_var y_var = t.y_var tracker_object = EKF_tracker(init_for_smc, init_covariance, A, B, x_var, y_var, bearing_var) #create tracker object #smc_object = smc_tracker(A,B,x_var,y_var,bearing_var,1000,np.array(init_for_smc)) s = sensor("POLICY_COMM") #create sensor object (stochastic policy) #s = sensor("CONS_A") measure = measurement(bearing_var) #create measurement object m = [] x_est = [] y_est = [] x_vel_est = [] y_vel_est = [] x_truth = [] y_truth = [] x_vel_truth = [] y_vel_truth = [] uncertainty = [] vel_error = []
def __init__(self): self.logger = logging.getLogger('sensing.sensing') self.tmpSensor = sensor() self.tmpJsonFileDataMgr = jsonFileDataMgr()
def run(args): # initialize parameters of interest # Method: # 0: linear policy # 1: RBF policy # 2: MLP policy method = args[0] RBF_components = args[1] MLP_neurons = args[2] process_index = args[3] folder_name = args[4] np.random.seed(1 + 100) vel_var = args[5] np.random.seed(process_index) print("Starting Thread:" + str(process_index)) #Initialize all the parameters params ={0:{},1:{},2:{}} if method==0: params[0]["weight"] = np.random.normal(0, .3, [2, num_states]) #params[0]["weight"] = np.array([[ 1.45702249, -1.17664153, -0.11593174, 1.02967173, -0.25321044, #0.09052774], #[ 0.67730786, 0.3213561 , 0.99580938, -2.39007038, -1.16340594, #-1.77515938]]) elif method==1: featurizer = sklearn.pipeline.FeatureUnion([("rbf1", RBFSampler(gamma=rbf_var, n_components=RBF_components, random_state=1))]) featurizer.fit(np.array(list_of_states)) # Use this featurizer for normalization params[1]["weight"] = np.random.normal(0, 1, [2, RBF_components]) elif method==2: params[2]["weigh1"] = np.random.normal(0, 1, [MLP_neurons, num_states]) params[2]["bias1"] = np.random.normal(0,1,[MLP_neurons,1]) params[2]["weigh2"] = np.random.normal(0, 1, [2, MLP_neurons]) params[2]["bias2"] = np.random.normal(0, 1, [2, 1]) return_saver = [] error_saver = [] episode_counter = 0 weight_saver1 = [] weight_saver2 = [] #for episode_counter in range(0,N_max): #Training parameters avg_reward = [] avg_error = [] var_reward = [] training = True result_folder = base_path+folder_name+"/" reward_file = open(result_folder+"reward_noise:"+str(vel_var)+"_"+str(process_index)+ "_linear_6states.txt","a") error_file = open(result_folder + "error_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") error_file_median = open(result_folder + "error_median_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_file = open(result_folder + "var_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") var_error_file = open(result_folder + "var_error_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") weight_file = open(result_folder + "weight_noise:" + str(vel_var) +"_"+str(process_index)+ "_linear_6states.txt", "a") #flatten initial weight and store the values if method==0: weight = params[0]['weight'] flatted_weights = list(weight[0, :]) + list(weight[1, :]) temp = [] [temp.append(str(x)) for x in flatted_weights] weight_file.write("\t".join(temp)+"\n") elif method==1: weight = params[1]['weight'] flatted_weights = list(weight[0, :]) + list(weight[1, :]) temp = [] [temp.append(str(x)) for x in flatted_weights] weight_file.write("\t".join(temp) + "\n") elif method==2: pass #weight = np.reshape(np.array(weights[0]), [2, 6]) sigma = sigma_max while episode_counter<N_max: #sigma = gen_learning_rate(episode_counter,sigma_max,.1,20000) if episode_counter%1500==0 and episode_counter>0: sigma-= .15 sigma = max(.1,sigma) #sigma = sigma_max discounted_return = np.array([]) discount_vector = np.array([]) #print(episodes_counter) scen = scenario(1,1) bearing_var = 1E-2#variance of bearing measurement #Target information x = 10000*random.random()-5000#initial x-location y = 10000 * random.random() - 5000#initial y-location xdot = 10*random.random()-5#initial xdot-value ydot = 10 * random.random() - 5#initial ydot-value init_target_state = [x,y,xdot,ydot]#initialize target state init_for_smc = [x+np.random.normal(0,5),y+np.random.normal(0,5),np.random.normal(0,5),np.random.normal(0,5)]#init state for the tracker (tracker doesn't know about the initial state) #init_for_smc = [x, y, xdot, ydot] init_sensor_state = [10000*random.random()-5000,10000 * random.random() - 5000,3,-2]#initial sensor-state temp_loc = np.array(init_target_state[0:2]).reshape(2,1) init_location_estimate = temp_loc+0*np.random.normal(np.zeros([2,1]),10) init_location_estimate = [init_location_estimate[0][0],init_location_estimate[1][0]] init_velocity_estimate = [6*random.random()-3,6*random.random()-3] init_velocity_estimate = [init_target_state[2],init_target_state[3]] init_estimate = init_location_estimate+init_velocity_estimate init_covariance = np.diag([MAX_UNCERTAINTY,MAX_UNCERTAINTY,MAX_UNCERTAINTY,MAX_UNCERTAINTY])#initial covariance of state estimation t = target(init_target_state[0:2], init_target_state[2], init_target_state[3], vel_var, vel_var, "CONS_V")#constant-velocity model for target motion A, B = t.constant_velocity(1E-10)#Get motion model x_var = t.x_var y_var = t.y_var tracker_object = EKF_tracker(init_for_smc, init_covariance, A,B,x_var,y_var,bearing_var)#create tracker object #smc_object = smc_tracker(A,B,x_var,y_var,bearing_var,1000,np.array(init_for_smc)) #Initialize sensor object if method==0: s = sensor("POLICY_COMM_LINEAR")#create sensor object (stochastic policy) elif method==1: s = sensor("POLICY_COMM_RBF") elif method==2: s = sensor("POLICY_COMM_MLP") measure = measurement(bearing_var)#create measurement object m = [] x_est = []; y_est = []; x_vel_est = []; y_vel_est = [] x_truth = []; y_truth = []; x_vel_truth = []; y_vel_truth = [] uncertainty = [] vel_error = [] pos_error = [] iteration = [] innovation = [] reward = [] episode_condition = True n=0 violation = 0 #store required information episode_state = [] episode_MLP_state = [] episode_actions = [] while episode_condition: t.update_location() m.append(measure.generate_bearing(t.current_location,s.current_location)) tracker_object.update_states(s.current_location, m[-1]) normalized_innovation = (tracker_object.innovation_list[-1])/tracker_object.innovation_var[-1] #print(normalized_innovation) #if (normalized_innovation<1E-4 or n<10) and n<200: #end of episode current_state = list(tracker_object.x_k_k.reshape(len(tracker_object.x_k_k))) + list(s.current_location) #print(current_state) #state normalization x_slope = 2.0/(scen.x_max-scen.x_min) y_slope = 2.0 / (scen.y_max - scen.y_min) x_slope_sensor = 2.0 / (40000) y_slope_sensor = 2.0 / (40000) vel_slope = 2.0/(scen.vel_max-scen.vel_min) #normalization current_state[0] = -1+x_slope*(current_state[0]-scen.x_min) current_state[1] = -1 + y_slope * (current_state[1] - scen.y_min) current_state[2] = -1 + vel_slope * (current_state[2] - scen.vel_min) current_state[3] = -1 + vel_slope * (current_state[3] - scen.vel_min) current_state[4] = -1 + x_slope * (current_state[4] -scen.x_min) current_state[5] = -1 + y_slope * (current_state[5] - scen.y_min) #Refactor states based on the usage if method==0 or method==2: input_state = current_state elif method==1: #Generate states for the RBF input input_state = featurizer.transform(np.array(current_state).reshape(1,len(current_state))) input_state = list(input_state[0]) extra_information = s.update_location_new(params,input_state,sigma) estimate = tracker_object.x_k_k episode_state.append(input_state) if method==2: episode_MLP_state.append(extra_information) #Output of the first layer for Gradient calculation truth = t.current_location x_est.append(estimate[0]) y_est.append(estimate[1]) x_vel_est.append(estimate[2]) y_vel_est.append(estimate[3]) x_truth.append(truth[0]) y_truth.append(truth[1]) x_vel_truth.append(t.current_velocity[0]) y_vel_truth.append(t.current_velocity[1]) vel_error.append(np.linalg.norm(estimate[2:4]-np.array([t.current_velocity[0],t.current_velocity[1]]).reshape(2,1))) pos_error.append(np.linalg.norm(estimate[0:2]-np.array(truth).reshape(2,1))) innovation.append(normalized_innovation[0]) unormalized_uncertainty = np.sum(tracker_object.p_k_k.diagonal()) #if unormalized_uncertainty>MAX_UNCERTAINTY: # normalized_uncertainty = 1 #else: # normalized_uncertainty = (1.0/MAX_UNCERTAINTY)*unormalized_uncertainty uncertainty.append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty) if len(uncertainty)<window_size+window_lag: reward.append(0) else: current_avg = np.mean(uncertainty[-window_size:]) prev_avg = np.mean(uncertainty[-(window_size+window_lag):-window_lag]) if current_avg<prev_avg or uncertainty[-1]<.1: #if current_avg < prev_avg: reward.append(1) else: reward.append(0) #reward.append(-1*uncertainty[-1]) #update return discount_vector = gamma*np.array(discount_vector) discounted_return+= (1.0*reward[-1])*discount_vector new_return = 1.0*reward[-1] list_discounted_return = list(discounted_return) list_discounted_return.append(new_return) discounted_return = np.array(list_discounted_return) list_discount_vector = list(discount_vector) list_discount_vector.append(1) discount_vector = np.array(list_discount_vector) iteration.append(n) if n>episode_length: break n+=1 #Based on the return from the episode, update parameters of the policy model #Normalize returns by the length of episode #if episode_counter%10==0 and episode_counter>0: print(weight_saver[-1]) prev_params = dict(params) condition = True if np.mean(pos_error)>10000: continue episode_condition = False episode_counter-=1 #if episode_counter%100==0 and training: #print("Starting the evaluation phase...") #training = False #episode_condition = False condition = True if episode_condition and training: normalized_discounted_return = discounted_return episode_actions = s.sensor_actions #init_weight = np.array(weight) rate = gen_learning_rate(episode_counter,learning_rate,1E-8,10000) total_adjustment = np.zeros(np.shape(weight)) for e in range(0,len(episode_actions)): #calculate gradiant state = np.array(episode_state[e]).reshape(len(episode_state[e]),1) #calculate gradient if method==0: gradiant = ((episode_actions[e].reshape(2,1)-params[0]['weight'].dot(state)).dot(state.transpose()))/sigma**2#This is the gradiant elif method==1: gradiant = ((episode_actions[e].reshape(2, 1) - params[1]['weight'].dot(state)).dot( state.transpose())) / sigma ** 2 # This is the gradiant elif method==2: #Gradient for MLP pass if np.max(np.abs(gradiant))>1E2: continue #clip large gradients if method==0: adjustment_term = gradiant*normalized_discounted_return[e]#an unbiased sample of return params[0]['weight'] += rate * adjustment_term elif method==1: adjustment_term = gradiant * normalized_discounted_return[e] # an unbiased sample of return params[1]['weight'] += rate * adjustment_term elif method==2: #Gradient for MLP pass #if not condition: # weight = prev_weight # continue episode_counter+=1 #flatted_weights = list(weight[0, :]) + list(weight[1, :]) #temp = [] #[temp.append(str(x)) for x in flatted_weights] #weight_file.write("\t".join(temp)+"\n") #weight_saver1.append(weight[0][0]) #weight_saver2.append(weight[0][1]) else: #print("garbage trajectory: no-update") pass #if not training: return_saver.append(sum(reward)) error_saver.append(np.mean(pos_error)) #print(len(return_saver),n) if episode_counter%100 == 0 and episode_counter>0: # if episode_counter%100==0 and episode_counter>0: print(episode_counter, np.mean(return_saver), sigma) #print(params[method]['weight']) #weight = np.reshape(np.array(weights[episode_counter]), [2, 6]) #print(weight) reward_file.write(str(np.mean(sorted(return_saver)[0:int(.95*len(return_saver))]))+"\n") error_file.write(str(np.mean(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") error_file_median.write(str(np.median(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") var_error_file.write(str(np.var(sorted(error_saver)[0:int(.95*len(error_saver))])) + "\n") var_file.write(str(np.var(sorted(return_saver)[0:int(.95*len(return_saver))]))+"\n") #weight_file.write(str(np.mean(return_saver)) + "\n") avg_reward.append(np.mean(sorted(return_saver)[0:int(.95*len(return_saver))])) avg_error.append(np.mean(sorted(error_saver)[0:int(.95*len(error_saver))])) var_reward.append(np.var(return_saver)) reward_file.close() var_file.close() error_file.close() error_file_median.close() var_error_file.close() weight_file.close() reward_file = open( result_folder + "reward_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") error_file = open( result_folder + "error_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_file = open( result_folder + "var_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") var_error_file = open( result_folder + "var_error_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") weight_file = open( result_folder + "weight_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") error_file_median = open( result_folder + "error_median_noise:" + str(vel_var) + "_" + str(process_index) + "_linear_6states.txt", "a") return_saver = [] error_saver = [] num_episodes.append(n)
def getData(self, port): if self.sensorMap.has_key(port): sense = sensor.sensor(self.sensorMap.get(port)) return sense.collect() return None
import sys sys.path.append("../common") sys.path.append("../sensor") import sensor import message import time pm = message.print_messager() foo = sensor.sensor("Test sensor", .01, pm) foo.init() foo.start() time.sleep(1) foo.stop()
def startSensors(self): for sense in self.sensorMap: sense = sensor.sensor(sense) sense.run()