def __init__(self, **kwargs): super().__init__(**kwargs) self.pattern_name = "Music" CONFIGS['n_pixels'] = self.strip_length # init visualizer self.vis = Visualizer(CONFIGS) # dict used to set the visualizer effect self.effect_dict = dict( spectrum=self.vis.visualize_spectrum, energy=self.vis.visualize_energy, scroll=self.vis.visualize_scroll, ) # name of the effect to be used self.effect = 'spectrum' self.modifiers = dict(visualizer=self.effect, ) # attributes for the mic self.p = None self.stream = None self.frames_per_buffer = int(CONFIGS['mic_rate'] / CONFIGS['fps']) self.setup()
def __init__(self, config, host='192.168.137.183', port=8000): super(ClientThread, self).__init__() self.config = config self.data = [] self.host = host self.port = port self.path = config.root + config.test_path + 'data' + self.host + 'temp.pkl' self.device = torch.device("cpu") parser = argparse.ArgumentParser( description='Wifi Indoor Positioning System') args = parser.parse_args() args.feat_dim = config.n_address args.dropout = 0.0 self.model = DNN_8(args) self.model.load_state_dict( torch.load( "./checkpoints/address_128_8_layer_jitter_0.05_valsplit_0.01/models/model.t7", map_location=self.device)) self.model.to(self.device) self.model.eval() print(self.model) self.do_run = True self.data = [] self.v = Visualizer() self.x = None self.y = None self.alpha = 0.5
def upload_to_kibana(iters): ''' setup connection to kibana and upload data :param iters: tweets in each partition :return: None ''' res = list(iters) if not len(res): return Visualizer.upload(res)
def __init__(self, actor, critic, episode_num, sim_world, visualization_speed): self.actor = actor self.critic = critic self.episode_num = episode_num self.sim_world = sim_world self.visualizer = Visualizer(self.sim_world.get_board(), self.sim_world.get_player(), visualization_speed)
def __init__(self): self.visualizer = Visualizer() self.render = False self.food = [] self.poison = [] self.generation = 1 self.bounds = WORLD_BOUNDS # tuple e.g. (x, y) self.agent = Agent() #self.pool = None if NUM_CORES < 2 else multiprocessing.Pool(NUM_CORES) self.init()
def __init__(self, config, host='192.168.137.183', port=8000): super(ClientThread, self).__init__() self.config = config self.data = [] self.host = host self.port = port self.path = config.root + config.test_path + 'data' + self.host + 'temp.pkl' self.device = torch.device("cpu") self.model = torch.load(config.root + config.model_path, map_location=torch.device('cpu')).to( self.device) self.model.eval() self.do_run = True self.data = [] self.v = Visualizer()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filename', type=str, default='./visualization/data/itr_2322.pkl' ) # defaultIS.h5/snapshots/iter0000480 parser.add_argument('--vid', type=str, default='./visualization/videos/contworldvid.mp4') parser.add_argument('--deterministic', action='store_true', default=False) parser.add_argument('--heuristic', action='store_true', default=False) parser.add_argument('--evaluate', action='store_true', default=False) parser.add_argument('--n_trajs', type=int, default=48) parser.add_argument('--n_steps', type=int, default=1000) parser.add_argument('--same_con_pol', action='store_true', default=False) args = parser.parse_args() with open('./visualization/data/params.json', 'r') as df: train_args = json.load(df) env = MAContWorld( n_rovers=train_args['n_rovers'], n_areas_of_int=train_args['n_areas_of_int'], n_coop=train_args['n_coop'], n_crater=train_args['n_crater'], n_sensors=train_args['n_sensors'], scout_reward=train_args['scout_reward'], crater_reward=train_args['crater_reward'], control_penalty=-.5, reward_mech='Local', encounter_reward=train_args[ 'encounter_reward'], #train_args['encounter_reward'], n_obstacles=3, addid=True, _speed_features=True, obstacle_loc=None) env.reset() if train_args['buffer_size'] > 1: env = ObservationBuffer(env, train_args['buffer_size']) hpolicy = None # heuristic policy if args.evaluate: minion = Evaluator(env, train_args, args.n_steps, args.n_trajs, args.deterministic, 'heuristic' if args.heuristic else 'rllab') evr = minion(args.filename, same_con_pol=args.same_con_pol, hpolicy=hpolicy) from tabulate import tabulate print(tabulate(evr, headers='keys')) else: minion = Visualizer(env, train_args, args.n_steps, args.n_trajs, args.deterministic, 'heuristic' if args.heuristic else 'rllab') rew, info = minion(args.filename, vid=args.vid, hpolicy=hpolicy) pprint.pprint(rew) pprint.pprint(info)
def visualize(self, knn_accuracy, knn_k, kmeans_accuracy, kmeans_k): visualize = Visualizer() if knn_accuracy != [] and kmeans_accuracy != []: visualize.kmeans_knn(knn_accuracy, knn_k, kmeans_accuracy, kmeans_k) elif kmeans_accuracy != []: visualize.kmeans(kmeans_accuracy, kmeans_k) elif knn_accuracy != []: visualize.knn(knn_accuracy, knn_k)
class Graph: def __init__(self): self.graph = defaultdict(list) # default_factory() of type list self.v = Visualizer() self.result = '' def add_edge(self, u, v): self.graph[u].append(v) self.graph[v].append(u) # Undirected graph is bidrectional self.v.add_edge(u, v) def visualize(self): self.v.visualize() def print(self, starting_node): print("DFS from vertex {} = {}".format(starting_node, self.result)) # The function to do DFS traversal. It uses # recursive DFSUtil() def DFS(self, v): ''' 6 starting vertices from 0 to 5 => bool array with 6 elements ''' visited_arr = [False] * (max(self.graph) + 1) ''' recursive function that takes the index of node and visited array ''' self.DFSUtil(v, visited_arr) # Recursive function for DFS with Backtracking def DFSUtil(self, v, visited): visited[v] = True # Adding V to result self.result += str(v) + " " # Traversing vertices adjacent to this vertex for i in self.graph[v]: if visited[i] == False: self.DFSUtil(i, visited)
class SegmentPlotCallback(keras.callbacks.Callback): def __init__(self, configuration, data_loader): self.configuration = configuration self.visualization_parent_path = configuration.VISUALIZATION_PARENT_PATH self.visualizer = Visualizer(configuration) self.data_loader = data_loader self.train_path = os.path.join(self.visualization_parent_path, 'train_sample_across_epochs') self.test_path = os.path.join(self.visualization_parent_path, 'test_sample_across_epochs') self.validation_path = os.path.join(self.visualization_parent_path, 'validation_sample_across_epochs') # change the following numbers to choose other segments for visualization self.train_segment_index = configuration.TRAIN_SEGMENT_INDEX self.test_segment_index = configuration.TEST_SEGMENT_INDEX self.validation_segment_index = configuration.VALIDATION_SEGMENT_INDEX # if not os.path.exists(self.train_path): os.makedirs(self.train_path) if not os.path.exists(self.test_path): os.makedirs(self.test_path) if not os.path.exists(self.validation_path): os.makedirs(self.validation_path) def on_epoch_end(self, epoch, logs={}): self.visualizer.visualize_prediction_segments( self.model, np.expand_dims( self.data_loader.train_segments[self.train_segment_index], 0), path=self.train_path, initial_segment=self.train_segment_index, epoch_id='epoch_' + str(epoch), layer_filter_list=['mclnn'], first_mclnn_only=True) self.visualizer.visualize_prediction_segments( self.model, np.expand_dims( self.data_loader.test_segments[self.test_segment_index], 0), path=self.test_path, initial_segment=self.test_segment_index, epoch_id='epoch_' + str(epoch), layer_filter_list=['mclnn'], first_mclnn_only=True) self.visualizer.visualize_prediction_segments( self.model, np.expand_dims( self.data_loader.validation_segments[ self.validation_segment_index], 0), path=self.validation_path, initial_segment=self.validation_segment_index, epoch_id='epoch_' + str(epoch), layer_filter_list=['mclnn'], first_mclnn_only=True)
class Graph: def __init__(self): self.graph = defaultdict(list) # default_factory() of type list self.v = Visualizer() self.result = "" def add_edge(self, u, v): self.graph[u].append(v) self.graph[v].append(u) # Undirected graph is bidrectional self.v.add_edge(u, v) # No BFS util required, no recusion required # Will be solved with just iteration # Passing children of nodes iteratively and visit them def BFS(self, starting_node): visited_arr = [False] * (len(self.graph)) queue = list() # enqueue it queue.append(starting_node) visited_arr[starting_node] = True while queue: node = queue.pop(0) self.result += str(node) + " " for i in self.graph[node]: if visited_arr[i] == False: queue.append(i) visited_arr[i] = True def print(self, starting_node): print("BFS from vertex {} = {}".format(starting_node, self.result)) def visualize(self): self.v.visualize()
def __init__(self, configuration, data_loader): self.configuration = configuration self.visualization_parent_path = configuration.VISUALIZATION_PARENT_PATH self.visualizer = Visualizer(configuration) self.data_loader = data_loader self.train_path = os.path.join(self.visualization_parent_path, 'train_sample_across_epochs') self.test_path = os.path.join(self.visualization_parent_path, 'test_sample_across_epochs') self.validation_path = os.path.join(self.visualization_parent_path, 'validation_sample_across_epochs') # change the following numbers to choose other segments for visualization self.train_segment_index = configuration.TRAIN_SEGMENT_INDEX self.test_segment_index = configuration.TEST_SEGMENT_INDEX self.validation_segment_index = configuration.VALIDATION_SEGMENT_INDEX # if not os.path.exists(self.train_path): os.makedirs(self.train_path) if not os.path.exists(self.test_path): os.makedirs(self.test_path) if not os.path.exists(self.validation_path): os.makedirs(self.validation_path)
def __init__(self, **kwargs): super().__init__(**kwargs) self.pattern_name = "Music" CONFIGS['n_pixels'] = self.strip_length # init visualizer self.vis = Visualizer(CONFIGS) # dict used to set the visualizer effect self.effect_dict = dict( spectrum=self.vis.visualize_spectrum, energy=self.vis.visualize_energy, scroll=self.vis.visualize_scroll, ) # name of the effect to be used self.effect = Modifier('visualizer', "spectrum", options=list(self.effect_dict.keys()), on_change=self.on_change) self._rate = Modifier('Delay', 0, minimum=0, maximum=0) self.modifiers = dict( effect=self.effect, ) # attributes for the mic self.p = None self.stream = None self.frames_per_buffer = int(CONFIGS['mic_rate'] / CONFIGS['fps']) try: # do not initialize if the pattern is temp if kwargs['handler'] is not None: self.setup() except OSError: music_logger.warning(f"Could not initialize the audio stream")
class Music(Default): """ Music reactive leds """ def __init__(self, **kwargs): super().__init__(**kwargs) self.pattern_name = "Music" CONFIGS['n_pixels'] = self.strip_length # init visualizer self.vis = Visualizer(CONFIGS) # dict used to set the visualizer effect self.effect_dict = dict( spectrum=self.vis.visualize_spectrum, energy=self.vis.visualize_energy, scroll=self.vis.visualize_scroll, ) # name of the effect to be used self.effect = Modifier('visualizer', "spectrum", options=list(self.effect_dict.keys()), on_change=self.on_change) self._rate = Modifier('Delay', 0, minimum=0, maximum=0) self.modifiers = dict( effect=self.effect, ) # attributes for the mic self.p = None self.stream = None self.frames_per_buffer = int(CONFIGS['mic_rate'] / CONFIGS['fps']) try: # do not initialize if the pattern is temp if kwargs['handler'] is not None: self.setup() except OSError: music_logger.warning(f"Could not initialize the audio stream") def setup(self): """ Setup stream """ self.p = pyaudio.PyAudio() self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=CONFIGS['mic_rate'], input=True, frames_per_buffer=self.frames_per_buffer) music_logger.info(f"Audio stream initialized with {CONFIGS['fps']} fps") def on_change(self, value): """ Set the effect to a certain value and change the visualization effect in the vis calss """ try: ef = self.effect_dict[value] self.vis.visualization_effect = ef except KeyError as e: music_logger.warning(f"Error for key {value}\n{e}") @property def rate(self): """ Rate should always be zero here """ return self._rate @rate.setter def rate(self, value): """ Cannot change the value of rate since music must be real time :param value: :return: """ pass def read_audio(self): """ Read audio and return it """ try: y = np.fromstring(self.stream.read(self.frames_per_buffer, exception_on_overflow=False), dtype=np.int16) y = y.astype(np.float32) return y except IOError: music_logger.warning('Audio buffer has overflowed') except AttributeError: music_logger.error("Could not read from audio buffer, do you have a microphone?") self.close() def fill(self): """ Read from audio stream and set pixels """ # read audio input, can also be none when the mic has not started yet output = self.read_audio() try: # use visualization pixels, _ = self.vis.audio_to_rgb(output) # Truncate values and cast to integer pixels = np.clip(pixels, 0, 255).astype(int) # Optional gamma correction pixels = _gamma[pixels] r, g, b = pixels for idx in range(len(r)): self.pixels[idx]['color'] = (r[idx], g[idx], b[idx], 255) except TypeError: pass def close(self): """ Call super method and close audio stream """ super(Music, self).close() try: # try to stop the stream if any self.stream.stop_stream() self.stream.close() except AttributeError: # if there is no recognized microphone then the close operation will fail on the stream pass finally: # terminate the py audio and log self.p.terminate() music_logger.info("Audio stream stopped")
parser.add_argument('--tail', type=str, default='', help='specific name') parser.add_argument('--visualize', action='store_true', default=False, help='Visualize the loss curve in visdom windows') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() print(args) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.visualize: vis = Visualizer( env='{}_{}_{}'.format(args.dataset, args.encoder, args.tail)) # ================== Model & Log Save Folder =================== # log = None # Save model and meta-data. Always saves in a new folder. if args.save_folder: if args.restore: pass else: exp_counter = 0 save_folder = os.path.join( args.save_folder, '{}_{}_{}_exp{}'.format(args.dataset, args.encoder, args.tail, exp_counter)) while os.path.isdir(save_folder): exp_counter += 1
def makeDiagram(n_quadrotors, n_balls, use_visualizer=False, trajectory_u=None, trajectory_x=None, trajectory_K=None): builder = DiagramBuilder() # Setup quadrotor plants and controllers quadrotor_plants = [] quadrotor_controllers = [] for i in range(n_quadrotors): new_quad = Quadrotor2D(n_quadrotors=n_quadrotors - 1, n_balls=n_balls) new_quad.set_name('quad_' + str(i)) plant = builder.AddSystem(new_quad) quadrotor_plants.append(plant) # Setup ball plants ball_plants = [] for i in range(n_balls): new_ball = Ball2D(n_quadrotors=n_quadrotors, n_balls=n_balls - 1) new_ball.set_name('ball_' + str(i)) plant = builder.AddSystem(new_ball) ball_plants.append(plant) # Connect all plants so that each object (quadrotor or ball) has access to all other object states as inputs for i in range(n_quadrotors): for j in range(n_quadrotors): if i == j: continue k = j if j < i else j - 1 builder.Connect(quadrotor_plants[j].get_output_port(0), quadrotor_plants[i].GetInputPort('quad_' + str(k))) for j in range(n_balls): builder.Connect(ball_plants[j].get_output_port(0), quadrotor_plants[i].GetInputPort('ball_' + str(j))) for i in range(n_balls): for j in range(n_quadrotors): builder.Connect(quadrotor_plants[j].get_output_port(0), ball_plants[i].GetInputPort('quad_' + str(j))) for j in range(n_balls): if i == j: continue k = j if j < i else j - 1 builder.Connect(ball_plants[j].get_output_port(0), ball_plants[i].GetInputPort('ball_' + str(k))) # Setup visualization if use_visualizer: visualizer = builder.AddSystem( Visualizer(n_quadrotors=n_quadrotors, n_balls=n_balls)) visualizer.set_name('visualizer') for i in range(n_quadrotors): builder.Connect(quadrotor_plants[i].get_output_port(0), visualizer.get_input_port(i)) for i in range(n_balls): builder.Connect(ball_plants[i].get_output_port(0), visualizer.get_input_port(n_quadrotors + i)) # Setup trajectory source if trajectory_x is not None and trajectory_u is not None and trajectory_K is not None: demulti_u = builder.AddSystem(Demultiplexer(2 * n_quadrotors, 2)) demulti_u.set_name('feedforward input') demulti_x = builder.AddSystem(Demultiplexer(6 * n_quadrotors, 6)) demulti_x.set_name('reference trajectory') demulti_K = builder.AddSystem(Demultiplexer(12 * n_quadrotors, 12)) demulti_K.set_name('time-varying K') for i in range(n_quadrotors): ltv_lqr = builder.AddSystem(LTVController(6, 2)) ltv_lqr.set_name('LTV LQR ' + str(i)) builder.Connect(demulti_x.get_output_port(i), ltv_lqr.get_input_port(0)) builder.Connect(quadrotor_plants[i].get_output_port(0), ltv_lqr.get_input_port(1)) builder.Connect(demulti_u.get_output_port(i), ltv_lqr.get_input_port(2)) builder.Connect(demulti_K.get_output_port(i), ltv_lqr.get_input_port(3)) builder.Connect(ltv_lqr.get_output_port(0), quadrotor_plants[i].get_input_port(0)) source_u = builder.AddSystem(TrajectorySource(trajectory_u)) source_u.set_name('source feedforward input trajectory') source_x = builder.AddSystem(TrajectorySource(trajectory_x)) source_x.set_name('source reference trajectory') demulti_source_x = builder.AddSystem( Demultiplexer([6 * n_quadrotors, 4 * n_balls])) demulti_source_x.set_name('quad and ball trajectories') source_K = builder.AddSystem(TrajectorySource(trajectory_K)) source_K.set_name('source time-varying K') builder.Connect(source_u.get_output_port(0), demulti_u.get_input_port(0)) builder.Connect(source_x.get_output_port(0), demulti_source_x.get_input_port(0)) builder.Connect(demulti_source_x.get_output_port(0), demulti_x.get_input_port(0)) builder.Connect(source_K.get_output_port(0), demulti_K.get_input_port(0)) else: demulti_u = builder.AddSystem(Demultiplexer(2 * n_quadrotors, 2)) demulti_u.set_name('quadrotor input') for i in range(n_quadrotors): builder.Connect(demulti_u.get_output_port(i), quadrotor_plants[i].get_input_port(0)) builder.ExportInput(demulti_u.get_input_port(0)) diagram = builder.Build() return diagram
# spin up ssl-vision data polling to update gamestate vision.start_updating(VISION_LOOP_SLEEP) if not VISION_ONLY: # spin up comms to send commands to robots home_comms.start_sending(COMMS_SEND_LOOP_SLEEP) # home_comms.start_receiving(COMMS_RECEIVE_LOOP_SLEEP) if CONTROL_BOTH_TEAMS: away_comms.start_sending(COMMS_SEND_LOOP_SLEEP) # away_comms.start_sending(COMMS_RECEIVE_LOOP_SLEEP) refbox.start_updating() # spin up strategy threads to control the robots home_strategy.start_controlling(HOME_STRATEGY, CONTROL_LOOP_SLEEP) if CONTROL_BOTH_TEAMS: away_strategy.start_controlling(AWAY_STRATEGY, CONTROL_LOOP_SLEEP) # initialize visualizer to show robots on screen visualizer = Visualizer(gamestate, home_strategy, away_strategy) # start the game - now everything should be going gamestate.start_game(GAME_LOOP_SLEEP) # Prepare to be interrupted by user exit_signal_received = False def exit_gracefully(signum, frame): global exit_signal_received if exit_signal_received: return else: exit_signal_received = True print('Exiting Everything') # clean up all threads vision.stop_updating()
def run(): # ======================================= Initialization ======================================= # all_folds_target_label = np.asarray([]) all_folds_majority_vote_cm = np.zeros( (Config.NB_CLASSES, Config.NB_CLASSES), dtype=np.int) all_folds_majority_vote_label = np.asarray([]) all_folds_probability_vote_cm = np.zeros( (Config.NB_CLASSES, Config.NB_CLASSES), dtype=np.int) all_folds_probability_vote_label = np.asarray([]) segment_size = sum(Config.LAYERS_ORDER_LIST) * 2 + Config.EXTRA_FRAMES print('Segment without middle frame:' + str(segment_size)) is_visualization_called_flag = False # visualization is done for first fold only using this variable # list of paths to the n-fold indices of the Training/Testing/Validation splits # number of paths should be e.g. 30 for 3x10, where 3 is for the splits and 10 for the 10-folds # Every 3 files are for one run to train and validate on 9-folds and test on the remaining fold. folds_index_file_list = glob.glob( os.path.join(Config.INDEX_PATH, "Fold*.hdf5")) if len(folds_index_file_list) == 0: print('Index path is not found = ' + Config.INDEX_PATH) return folds_index_file_list.sort() cross_val_index_list = np.arange( 0, Config.SPLIT_COUNT * Config.CROSS_VALIDATION_FOLDS_COUNT, Config.SPLIT_COUNT) # ======================================= Start cross-validation ======================================= # for j in range(cross_val_index_list[Config.INITIAL_FOLD_ID], len(folds_index_file_list), Config.SPLIT_COUNT): test_index_path = folds_index_file_list[j] if folds_index_file_list[ j].lower().endswith('_test.hdf5') else None train_index_path = folds_index_file_list[ j + 1] if folds_index_file_list[j + 1].lower().endswith( '_train.hdf5') else None validation_index_path = folds_index_file_list[ j + 2] if folds_index_file_list[j + 2].lower().endswith( '_validation.hdf5') else None if None in [test_index_path, train_index_path, validation_index_path]: print( 'Train / Validation / Test indices are not correctly assigned') exit(1) np.random.seed(0) # for reproducibility data_loader = DataLoader() mclnn_trainer = MCLNNTrainer() # --------------------------------- Load data ----------------------------- # data_loader.load_data(segment_size, Config.STEP_SIZE, Config.NB_CLASSES, Config.DATASET_FILE_PATH, Config.STANDARDIZATION_PATH, train_index_path, test_index_path, validation_index_path) # ------------------------------ Weights path ---------------------------- # train_index_filename = os.path.basename(train_index_path).split('.')[0] weights_to_store_foldername = train_index_filename + '_' \ + 'batch' + str(Config.BATCH_SIZE) \ + 'wait' + str(Config.WAIT_COUNT) \ + 'order' + str(Config.LAYERS_ORDER_LIST[0]) \ + 'extra' + str(Config.EXTRA_FRAMES) fold_weights_path = os.path.join(Config.ALL_FOLDS_WEIGHTS_PATH, weights_to_store_foldername) if not os.path.exists(fold_weights_path): if Config.USE_PRETRAINED_WEIGHTS == False: os.makedirs(fold_weights_path) elif Config.USE_PRETRAINED_WEIGHTS == True: print('Pre-trained weights do not exist in :' + fold_weights_path) exit(1) # -------------------------- Build and Train model ----------------------- # print('----------- Training param -------------') print(' batch_size>' + str(Config.BATCH_SIZE) + ' nb_classes>' + str(Config.NB_CLASSES) + ' nb_epoch>' + str(Config.NB_EPOCH) + ' mclnn_layers>' + str(Config.MCLNN_LAYER_COUNT) + ' dense_layers>' + str(Config.DENSE_LAYER_COUNT) + ' norder>' + str(Config.LAYERS_ORDER_LIST) + ' extra_frames>' + str(Config.EXTRA_FRAMES) + ' segment_size>' + str(segment_size + 1) + # plus 1 is for middle frame, considered in segmentation stage ' initial_fold>' + str(Config.INITIAL_FOLD_ID + 1) + # plus 1 beacuse folds are zero indexed ' wait_count>' + str(Config.WAIT_COUNT) + ' split_count>' + str(Config.SPLIT_COUNT)) if Config.USE_PRETRAINED_WEIGHTS == False: model = mclnn_trainer.build_model( segment_size=data_loader.train_segments.shape[1], feature_count=data_loader.train_segments.shape[2], pretrained_weights_path=None) mclnn_trainer.train_model(model, data_loader, fold_weights_path) # ------------------ Load trained weights in a new model ------------------ # # load paths of all weights generated during training weight_list = glob.glob(os.path.join(fold_weights_path, "*.hdf5")) if len(weight_list) == 0: print('Weight path is not found = ' + fold_weights_path) return weight_list.sort(key=os.path.getmtime) if len(weight_list) > 1: startup_weights = weight_list[-(Config.WAIT_COUNT + 2)] elif len(weight_list) == 1: startup_weights = weight_list[0] print('----------- Weights Loaded ---------------:') print(startup_weights) model = mclnn_trainer.build_model( segment_size=data_loader.train_segments.shape[1], feature_count=data_loader.train_segments.shape[2], pretrained_weights_path=startup_weights) # ------------------------ Visualize a test sample --------------------- # if is_visualization_called_flag == False and Config.SAVE_TEST_SEGMENT_PREDICTION_IMAGE == True: visualizer = Visualizer(Config) visualizer.visualize_weights_and_sample_test_clip( model=model, data_loader=data_loader) is_visualization_called_flag = True # --------------------------- Evaluate model ------------------------------ # fold_majority_cm, fold_probability_cm, \ fold_majority_vote_label, fold_probability_vote_label, \ fold_target_label = mclnn_trainer.evaluate_model(segment_size=segment_size, model=model, data_loader=data_loader) all_folds_majority_vote_cm += fold_majority_cm all_folds_majority_vote_label = np.append( all_folds_majority_vote_label, fold_majority_vote_label) all_folds_probability_vote_cm += fold_probability_cm all_folds_probability_vote_label = np.append( all_folds_probability_vote_label, fold_probability_vote_label) all_folds_target_label = np.append(all_folds_target_label, fold_target_label) gc.collect() print('-------------- Cross validation performance --------------') print(Config.CLASS_NAMES) print(all_folds_majority_vote_cm) print( str(Config.CROSS_VALIDATION_FOLDS_COUNT) + '-Fold Clip-level majority-vote Accuracy ' + str( accuracy_score(all_folds_target_label, all_folds_majority_vote_label))) print(Config.CLASS_NAMES) print(all_folds_probability_vote_cm) print( str(Config.CROSS_VALIDATION_FOLDS_COUNT) + '-Fold Clip-level probability-vote Accuracy ' + str( accuracy_score(all_folds_target_label, all_folds_probability_vote_label))) scoref1 = f1score(all_folds_target_label, all_folds_probability_vote_label, average='micro') print('F1 Score micro ' + str(scoref1)) scoref1 = f1score(all_folds_target_label, all_folds_probability_vote_label, average='weighted') print('F1 Score weighted ' + str(scoref1))
# 'error': error, # 'traceback' : traceback.format_exc()} return Response(json.dumps(response), mimetype='application/json') if __name__ == '__main__': # Configuracion del registro de mensajes de seguimiento logging.basicConfig(filename='log/chatbot.log') logger = logging.getLogger('ai_app') logger.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s: %(message)s', '%d-%m-%Y %I:%M:%S %p') console_handler = logging.StreamHandler() ner = SubRed_NER('SUBRED_pos', 'normalization_models.pkl') answer_manager = AnswerManager(state_machine=StateMachine( 'tree_dialog_config.json', 'trained_intentions.pkl', ner), templates_dir='templates/', time_per_word=0) answer_manager.return_repeated = False visualizer = Visualizer(ner) with app.app_context(): db.drop_all() db.create_all() socketio.run(app, host=sys.argv[1], port=sys.argv[2], debug=False) # app.run(host = '192.168.222.63', port = 8004)
try: with open("/etc/machine-id", "r") as fh: app.config['SECRET_KEY'] = fh.read() except: app.config['SECRET_KEY'] = 'secret!' sockets = Sockets(app) # Build pipeline grabber = PanoramaGrabber() # config read from ~/.robovision/grabber.conf image_recognizer = ImageRecognizer(grabber) gameplay = Gameplay(image_recognizer) rf = RemoteRF(gameplay, "/dev/ttyACM0") visualizer = Visualizer(image_recognizer, framedrop=1) recorder = Recorder(grabber) def generator(): visualizer.enable() queue = visualizer.get_queue() while True: try: buf, resized, frame, r = queue.get_nowait() except Empty: sleep(0.001) # Fix this stupid thingie continue else: yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' yield buf yield b'\r\n\r\n'
l = Log(file_path) # variants_count = case_statistics.get_variant_statistics(l.log) # variants_count = \ # sorted(variants_count, # key=lambda x: x['count'], # reverse=True) # print('') # print(variants_count) # print('') l.filter_variants(Log.INTENSE_FILTERING) # dfg_discovery.apply(l.log) dfg = DFG(l.log) Visualizer.dfg_visualizer(dfg.dfg, l.log) # variants_count = case_statistics.get_variant_statistics(l.log) # variants_count = \ # sorted(variants_count, # key=lambda x: x['count'], # reverse=True) # print('') # print(variants_count) # print('')
def __init__(self): self.graph = defaultdict(list) # default_factory() of type list self.v = Visualizer() self.result = ""
class Music(Default): def __init__(self, **kwargs): super().__init__(**kwargs) self.pattern_name = "Music" CONFIGS['n_pixels'] = self.strip_length # init visualizer self.vis = Visualizer(CONFIGS) # dict used to set the visualizer effect self.effect_dict = dict( spectrum=self.vis.visualize_spectrum, energy=self.vis.visualize_energy, scroll=self.vis.visualize_scroll, ) # name of the effect to be used self.effect = 'spectrum' self.modifiers = dict(visualizer=self.effect, ) # attributes for the mic self.p = None self.stream = None self.frames_per_buffer = int(CONFIGS['mic_rate'] / CONFIGS['fps']) self.setup() def setup(self): """ Setup stream """ self.p = pyaudio.PyAudio() self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=CONFIGS['mic_rate'], input=True, frames_per_buffer=self.frames_per_buffer) @property def effect(self): return self._effect @effect.setter def effect(self, value): """ Set the effect to a certain value and change the visualization effect in the vis calss """ try: ef = self.effect_dict[value] self.vis.visualization_effect = ef self._effect = value except KeyError as e: print(f"Error for key {value}\n{e}") @property def rate(self): """ Rate should always be zero here9 """ return 0 @rate.setter def rate(self, value): pass def read_audio(self): """ Read audio and return it """ try: y = np.fromstring(self.stream.read(self.frames_per_buffer, exception_on_overflow=False), dtype=np.int16) y = y.astype(np.float32) self.stream.read(self.stream.get_read_available(), exception_on_overflow=False) return y except IOError: print('Audio buffer has overflowed') def fill(self): """ Read from audio stream and set pixels """ # read audio input, can also be none when the mic has not started yet output = self.read_audio() try: # use visualization pixels, _ = self.vis.audio_to_rgb(output) # Truncate values and cast to integer pixels = np.clip(pixels, 0, 255).astype(int) # Optional gamma correction pixels = _gamma[pixels] r, g, b = pixels for idx in range(len(r)): self.pixels[idx]['color'] = (r[idx], g[idx], b[idx], 255) except TypeError: pass def stop(self): """ Call super method and close audio stream """ super(Music, self).stop() self.stream.stop_stream() self.stream.close() self.p.terminate()
providers += [SSLVisionDataProvider()] if not NO_REFBOX: providers += [RefboxDataProvider()] if not NO_RADIO: providers += [Comms(HOME_TEAM)] if CONTROL_BOTH_TEAMS: providers += [Comms(AWAY_TEAM, True)] providers += [Strategy(HOME_TEAM, HOME_STRATEGY)] if CONTROL_BOTH_TEAMS: providers += [Strategy(AWAY_TEAM, AWAY_STRATEGY)] providers += [Visualizer()] # Pass the providers to the coordinator c = Coordinator(providers) # Setup the exit handler def stop_it(signum, frame): c.stop_game() signal.signal(signal.SIGINT, stop_it) # Start the game c.start_game() # Exit once game is over sys.exit()
:param iters: tweets in each partition :return: None ''' res = list(iters) if not len(res): return Visualizer.upload(res) if __name__ == '__main__': sc = SparkContext('local[2]', 'TwitterApp') ssc = StreamingContext(sc, 4) ssc.checkpoint(r'./checkpoint') streams = ssc.socketTextStream(TCP_IP, TCP_PORT) # windowed stream and combine data in each rdd then devide tweet correctly windowed_stream = streams.window(4, 4).repartition(1) corrected_stream = windowed_stream.mapPartitions(lambda iters: seperate_tweets(iters), preservesPartitioning=False) # fileter out tweets which have no coordinates corrected_stream = corrected_stream.filter(lambda x: re.search('\\(.+\\)', x)) sentiments = corrected_stream.filter(lambda x: len(x) > 0).\ mapPartitions(lambda iters: analyze(iters), preservesPartitioning=False) reformat_stream = sentiments.map(lambda tup: format_data(tup)) reformat_stream.pprint() reformat_stream.foreachRDD(lambda rdd: rdd.foreachPartition(upload_to_kibana)) Visualizer.init() ssc.start() ssc.awaitTermination() Analyst.my_nlp.close()
pred_gp_mean_trajs[j] = mean_traj pred_gp_variance_trajs[j] = variance_traj rollout_gp_trajs[j] = state_traj return pred_gp_mean, pred_gp_variance, rollout_gp, pred_gp_mean_trajs, pred_gp_variance_trajs, rollout_gp_trajs if __name__ == '__main__': import matplotlib.pyplot as plt plt.style.use('ggplot') from cartpole_sim import CartpoleSim from policy import SwingUpAndBalancePolicy, RandomPolicy from visualization import Visualizer import cv2 vis = Visualizer(cartpole_length=1.5, x_lim=(0.0, DELTA_T * NUM_DATAPOINTS_PER_EPOCH)) swingup_policy = SwingUpAndBalancePolicy('policy.npz') random_policy = RandomPolicy(seed=12831) sim = CartpoleSim(dt=DELTA_T) # Initial training data used to train GP for the first epoch init_state = np.array([0.01, 0.01, np.pi * 0.5, 0.1]) * rng.randn(4) ts, state_traj, action_traj = sim_rollout(sim, random_policy, NUM_DATAPOINTS_PER_EPOCH, DELTA_T, init_state) delta_state_traj = state_traj[1:] - state_traj[:-1] train_x, train_y = make_training_data(state_traj[:-1], action_traj, delta_state_traj)
# print(variants_count) # print('') # l.filter_variants(Log.INTENSE_FILTERING) # dfg_discovery.apply(l.log) parameters = {Parameters.NOISE_THRESHOLD:True} im = InductiveMiner(l.log, parameters) for place in im.net.places: print("\nPLACE: "+place.name) for arc in place.in_arcs: print(arc.source.name, arc.source.label) Visualizer.petrinet_visualizer(im.net, im.initial_marking, im.final_marking) # variants_count = case_statistics.get_variant_statistics(l.log) # variants_count = \ # sorted(variants_count, # key=lambda x: x['count'], # reverse=True) # print('') # print(variants_count) # print('')
class Environment: # # Each environment should have a config and a Population # the environment should also do the evaluation of the genomes # # each genome is given in a list with an ID and the actual Genome # we will create an agent for each ID/Genome and use that to simulate # # each evaluation will do improve the network for X generations # we then simulate the each network and observe its actions, # each action will change the properties of the Agent class it was given, # in the simulation we observe the action and what score it got from it, # this is called an evaluation # # each new evaluation will be called an episode # in each episode we store the overall score/fitness # the agents will be visualized for Y seconds before doing another # evaluation of the genomes. # # the genome fitness in our case is for an example how much food it has eaten, # the fitness will drop extremely when the agent eats poison # # the fitness in the beginning of the experiment would probably be the ratio # between food and poison # fitness = food / poison, or fitness = food / (poison / 2) # def __init__(self): self.visualizer = Visualizer() self.render = False self.food = [] self.poison = [] self.generation = 1 self.bounds = WORLD_BOUNDS # tuple e.g. (x, y) self.agent = Agent() #self.pool = None if NUM_CORES < 2 else multiprocessing.Pool(NUM_CORES) self.init() def get_scaled_inputs(self, agent): pos = agent.pos grid = [] debug_grid_pos = [] grid_radius = int( AGENT_GRID_SIZE // 2) # get's the number of grid spaces on each side of the agent x0grid = int(pos[0] - ((SINGLE_GRID_SIZE * grid_radius) + (SINGLE_GRID_SIZE / 2))) y0grid = int(pos[1] - ((SINGLE_GRID_SIZE * grid_radius) + (SINGLE_GRID_SIZE / 2))) #x1grid = int(x0grid + (AGENT_SIZE * AGENT_GRID_SIZE)) #y1grid = int(y0grid + (AGENT_SIZE * AGENT_GRID_SIZE)) gridsize = (SINGLE_GRID_SIZE * AGENT_GRID_SIZE) for y in range(AGENT_GRID_SIZE): for x in range(AGENT_GRID_SIZE): debug_grid_pos.append((x0grid + x * SINGLE_GRID_SIZE, y0grid + y * SINGLE_GRID_SIZE)) grid.append(0) for idx in range(len(self.food)): xrpos = int(self.food[idx][0] - x0grid) yrpos = int(self.food[idx][1] - y0grid) if xrpos >= 0 and yrpos >= 0 and xrpos < gridsize and yrpos < gridsize: # A thing is in the grid xrpos = int(xrpos / SINGLE_GRID_SIZE) yrpos = int(yrpos / SINGLE_GRID_SIZE) index = xrpos + yrpos * AGENT_GRID_SIZE grid[index] = 1 #print(index) for idx in range(len(self.poison)): xrpos = int(self.poison[idx][0] - x0grid) yrpos = int(self.poison[idx][1] - y0grid) if xrpos >= 0 and yrpos >= 0 and xrpos < gridsize and yrpos < gridsize: # A thing is in the grid xrpos = int(xrpos / SINGLE_GRID_SIZE) yrpos = int(yrpos / SINGLE_GRID_SIZE) index = xrpos + yrpos * AGENT_GRID_SIZE grid[index] = -1 #print(index) if RENDER_DEBUG and self.render: self.visualizer.drawDebug(debug_grid_pos, grid) grid.append(agent.pos[0] / WORLD_BOUNDS[0]) grid.append(agent.pos[1] / WORLD_BOUNDS[1]) return [int(i) for i in grid] def get_distance(self, pos1, pos2): delta_x = pos1[0] - pos2[0] delta_y = pos1[1] - pos2[1] return (math.sqrt(delta_x * delta_x + delta_y * delta_y)) def step(self, actions, agent): reward = 0 pos_x = agent.pos[0] pos_y = agent.pos[1] if actions[0]: pos_x += AGENT_SPEED * actions[0] if actions[1]: pos_x -= AGENT_SPEED * actions[1] if actions[2]: pos_y += AGENT_SPEED * actions[2] if actions[3]: pos_y -= AGENT_SPEED * actions[3] if LOOP_BOUNDS: if pos_x > WORLD_BOUNDS[0]: pos_x = 0 if pos_x < 0: pos_x = WORLD_BOUNDS[0] if pos_y > WORLD_BOUNDS[1]: pos_y = 0 if pos_y < 0: pos_y = WORLD_BOUNDS[1] else: if pos_x > WORLD_BOUNDS[0]: pos_x = WORLD_BOUNDS[0] if pos_x < 0: pos_x = 0 if pos_y > WORLD_BOUNDS[1]: pos_y = WORLD_BOUNDS[1] if pos_y < 0: pos_y = 0 agent.set_pos((int(pos_x), int(pos_y))) toRemove_food = [] for pos in self.food: if self.get_distance(agent.pos, pos) < (FOOD_SIZE + AGENT_SIZE) / 2: reward += FOOD_REWARD toRemove_food.append(pos) dead = False toRemove_poison = [] for pos in self.poison: if self.get_distance(agent.pos, pos) < (POISON_SIZE + AGENT_SIZE) / 2: dead = True reward += POISON_REWARD toRemove_poison.append(pos) for elem in toRemove_food: self.food.remove(elem) for elem in toRemove_poison: self.poison.remove(elem) return (dead, reward) def simulate(self, genome, net): self.invalidate_agents = True if self.generation % CHECKPOINT_INTERVAL == 0 and False: self.visualizer.start_recording( ("Video/" + str(self.generation) + ".mp4")) scores = [] score = 0 steps = 0 self.place_foods() self.place_poisons() self.agent.set_pos(self.get_random_pos(agent_spawn=True)) while steps < SIMULATION_TICKS: steps += 1 old_pos = self.agent.pos if not self.agent.alive: break if self.generation % CHECKPOINT_INTERVAL == 0: self.visualizer.clear_view() inputs = self.get_scaled_inputs( self.agent) # a.get_scaled_inputs(self) output = net.activate(inputs) dead, reward = self.step(output, self.agent) dist_moved = self.get_distance(old_pos, self.agent.pos) if dead: self.agent.alive = False score += reward if self.generation % CHECKPOINT_INTERVAL == 0 and False: self.visualizer.update_view(self) #self.respawn_items() if self.generation % CHECKPOINT_INTERVAL == 0 and False: self.visualizer.flush() return score def simulate_individual(self, networks): self.invalidate_agents = True if self.generation % CHECKPOINT_INTERVAL == 0: self.visualizer.start_recording( ("Video/" + str(self.generation) + ".mp4")) scores = [] rewards = {} steps = 0 self.place_foods() self.place_poisons() while steps < SIMULATION_TICKS: steps += 1 if self.generation % CHECKPOINT_INTERVAL == 0: self.visualizer.clear_view() for gid, genome, net in networks: if self.invalidate_agents: self.init_agents(networks) self.invalidate_agents = False if gid not in self.agents: self.agents[gid] = Agent(gid) a = self.agents[gid] if not a.alive: continue inputs = self.get_scaled_inputs(a) # a.get_scaled_inputs(self) output = net.activate(inputs) dead, reward = self.step(output, a) if dead: a.alive = False continue genome.fitness += reward if gid not in self.rewards: self.rewards[gid] = [] self.rewards[gid].append(reward) if self.generation % CHECKPOINT_INTERVAL == 0: self.visualizer.update_view(self) #self.respawn_items() if self.generation % CHECKPOINT_INTERVAL == 0: self.visualizer.flush() for k in self.rewards: scores.append(sum(self.rewards[k])) print("Score range [{:.3f}, {:.3f}]".format(min(scores), max(scores))) def simulate_best(self, network): self.visualizer.start_recording( ("Video/" + str(self.generation) + " - best" + ".mp4")) self.render = True scores = [] rewards = {} steps = 0 self.place_foods() self.place_poisons() self.agent.set_pos(self.get_random_pos(agent_spawn=True)) while steps < SIMULATION_TICKS: steps += 1 self.visualizer.clear_view() inputs = self.get_scaled_inputs( self.agent) # a.get_scaled_inputs(self) output = network.activate(inputs) dead, reward = self.step(output, self.agent) self.visualizer.update_view(self) self.visualizer.flush() def respawn_items(self): food_count = len(self.food) respawn_chance = (NUM_FOOD - food_count) / NUM_FOOD if random.random() < respawn_chance and food_count < NUM_FOOD: self.food.append(self.get_random_pos()) poison_count = len(self.poison) respawn_chance = (NUM_POISON - poison_count) / NUM_POISON if random.random() < respawn_chance and poison_count < NUM_POISON: self.poison.append(self.get_random_pos()) def init(self): self.place_foods() self.place_poisons() def get_random_pos(self, agent_spawn=False): x_bound = WORLD_BOUNDS[0] y_bound = WORLD_BOUNDS[1] if agent_spawn: return int(x_bound / 2), int(y_bound / 2) while True: rand_x = random.randrange(0, x_bound) rand_y = random.randrange(0, y_bound) if self.get_distance((self.bounds[0] / 2, self.bounds[1] / 2), (rand_x, rand_y)) < SPAWN_AREA_SIZE: continue for f in self.food: if self.get_distance((rand_x, rand_y), f) < MIN_SPAWN_DIST: continue for p in self.poison: if self.get_distance((rand_x, rand_y), p) < MIN_SPAWN_DIST: continue return rand_x, rand_y def place_foods(self): self.food = [] for _ in range(NUM_FOOD): self.food.append(self.get_random_pos()) def place_poisons(self): self.poison = [] for _ in range(NUM_POISON): self.poison.append(self.get_random_pos())
def main(): vehicle = Vehicle() simulator = Simulator(controller=vehicle, initial_state=np.array([*POSITION0, *VELOCITY0, *ORIENTATION0, *OMEGA0]), sim_time=SIM_TIME) simulator.simulate() visualization = Visualizer(simulator=simulator) print(simulator.state)
def test_zebra_contours(video_path, save_path): visor = Visor(debuger) visor.initializer() navigator = BlindNavigator() visualizer = Visualizer() cmap = { 'LIGHT_WAIT': '等灯', 'START_FORWARD':'开始前进', 'CROSS_FORWARD':'继续前行', 'CROSS_WAIT':'有障碍物', 'ARRIVAL': '到达', } for iiii, data in enumerate(read_video(video_path)): if data is None: break import cv2 print(data.shape) # import pdb # pdb.set_trace() p_light, detected_obstacles, traffic_lights, road_mask = navigator.executor(data) data = visor.inliner(data, save_path) # traffic_lights = [] for j in navigator.zebra_contours: # import pdb # pdb.set_trace() bx, by, bw, bh = cv2.boundingRect(j) [bx, by, bw, bh] = visor.rescale_box([bx, by, bw, bh]) cv2.rectangle(data, (bx, by), (bx + bw, by + bh), (220, 252, 255), -1) plt.figure() ax = plt.gca() ax.imshow(road_mask.astype(np.uint8)) # plt.show() plt.savefig('test.png') depth = navigator.depth_estimator.predict(data) data = visualizer.plot(data, detected_obstacles, depth, navigator.is_stable, navigator.zebra_contours) data = cv2.cvtColor(data, cv2.COLOR_BGRA2BGR) text = cmap[navigator.state] from PIL import Image, ImageDraw, ImageFont cv2_im = cv2.cvtColor(data, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同 pil_im = Image.fromarray(cv2_im) draw = ImageDraw.Draw(pil_im) # 括号中为需要打印的canvas,这里就是在图片上直接打印 if 1 or p_light is not None: # bbox, state = p_lights.get_bbox(), p_lights.get_state() # traffic_lights.append((state, bbox)) for bbox, state, d_or_t in zip(traffic_lights.get(), traffic_lights.get_field('states'), traffic_lights.get_field('d_or_t')): bbox = visor.rescale_box(bbox) # assert id_ < len(navigator.traffic_light_pool.trackers), 'id_ not valid' for i in range(thickness): draw.rectangle([tuple(bbox[:2] - i), tuple(bbox[2:4] + i)]) label_size_ = draw.textsize('%s, %s' % (state, d_or_t), efont) if bbox[1] - label_size_[1] >= 0: text_origin = np.array([bbox[0], bbox[1] - label_size_[1]]) else: text_origin = np.array([bbox[0], bbox[1] + 1]) draw.rectangle([tuple(text_origin), tuple(label_size_ + text_origin)], fill=(0x00, 0x99, 0x11)) draw.text(tuple(text_origin), '%s, %s' % (state, d_or_t), (255, 255, 255), font=efont) text += 'frame: %d' % iiii label_size = draw.textsize(text, cfont) draw.rectangle( [(0, 0), tuple(label_size)], fill=(0x00, 0x99, 0xFF)) draw.text((0, 0), text, (255, 255, 255), font=cfont) # 第一个参数为打印的坐标,第二个为打印的文本,第三个为字体颜色,第四个为字体 data = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) visor.drawer(data)