def main(args): # data = make_circles(100, shuffle=True)[0] # data = ReadPlyFile('data/bun000.ply').get_data() data = ReadPlyFile('data/drill_1.6mm_0_cyb.ply').get_data() # data = ReadPlyFile('data/dragonStandRight_0.ply').get_data() # print(len(data)) def filter_norm(point): return np.linalg.norm(point - np.array(data).min(0)) def filter_x(point): return point[0] mapper = Mapper(data, resolution=0.2, overlap=0.4, cluster_alg='kmeans', max_clusters=5, filter=filter_norm) graph = mapper.run() print(graph) viz = Visualization(graph) viz.draw(36, 3000) persistence = Persistence(graph) persistence.draw()
def bfs(a, b): global grid, start, end grid = [0 for i in range(Control.cols)] for i in range(Control.cols): grid[i] = [0 for i in range(Control.row)] for i in range(Control.cols): for j in range(Control.row): grid[i][j] = Node(i, j) for i in range(Control.cols): for j in range(Control.row): grid[i][j].show((255, 255, 255), 1) start = grid[a[0]][a[1]] end = grid[b[0]][b[1]] end.show((255, 8, 127), 0) start.show((255, 8, 127), 0) algo = Algorithm_A(grid, start, end) x = Visualization(grid, start, end) if x.initial(): algo.BFS()
def __init__(self): ### Initializing all the required variables. self.x_train = [] self.y_train = [] self.x_test = [] self.y_test = [] self.x_valid = [] self.y_valid = [] self.x_train_aug = [] self.y_train_aug = [] self.x_train_gray = [] self.x_valid_gray = [] self.x_test_gray = [] self.x_train_shuffle = [] self.x_valid_shuffle = [] self.x_test_shuffle = [] self.y_train_shuffle = [] self.y_valid_shuffle = [] self.y_test_shuffle = [] self.y_train_hot = [] self.y_valid_hot = [] self.y_test_hot = [] self.x_train_reshape = [] self.x_valid_reshape = [] self.x_test_reshape = [] self.x_train_final = [] self.x_valid_final = [] self.x_test_final = [] self.y_train_final = [] self.y_valid_final = [] self.y_test_final = [] #self._x_train_gray = [] #self._x_test_gray = [] ### Creating class instances of supporting classes. self.my_variables = MyVariables() self.bs = BasicSummary() self.vz = Visualization() self.my_image_processor = MyImageProcessor() ### Invoking the current class methods. self.invoke_data_summary() self.preprocessing() self.image_augmentation() self.split_and_shuffle_data() self.apply_one_hot_encoding_to_labels() self.reshape_features_data() self.get_final_data_values()
def __init__(self): # Init basic objects self.cropper = Cropper() self.extractor = Extractor(self.cropper) self.classifier = Classifier(self.extractor.images) self.connections = Connections(self.extractor, self.classifier) self.visualization = Visualization(self.connections)
def test_sma_plot_result(self): df = self.get_data() Bt = Backtesting(df) df = Bt.SMA_single_parameter() vi = Visualization(df) result = vi.sma_plot_result() self.assertIsNone(result)
def createVisualization(self): v1 = Visualization(self.X, self.Y, self.Z, self.T) #v1.createObjectChart3D() #v1.createSPHChart3D() #positionX, positionY, positionZ to miejsce w ktorym przekroj ma powstac <0;10> #positionX = 9 #v1.createIntersectionX(positionX) positionY = 9 v1.createIntersectionY(positionY)
def visualize_tweet_by_domain(): """ Visualize the processed tweets based on domain. """ sum_domain_path = 'datasets/sum_domain.txt' sum_domain = [] tweets_file = open(sum_domain_path, "r") for line in tweets_file: try: sum_domain = json.loads(line) except: continue v = Visualization() v.draw_domain_histogram(sum_domain)
def __init__(self, mu, Sigma, R, Q, visualize=True): self.mu = mu self.Sigma = Sigma self.R = R self.Q = Q # Maps feature IDs to row indices in mean matrix self.mapLUT = {} self.visualize = visualize if self.visualize == True: self.vis = Visualization() else: self.vis = None
def report(self, schedule, numm): warring = 0 print('_______schedule______') print(numm) print(len(schedule.shuttles)) for shuttle in schedule.shuttles: shutable = self.DG.checkAble(shuttle) print(shuttle.before, shuttle.trip, shutable) if not shutable: print("ERROR : *This Shuttle is NOT serviceable.*") warring += 1 if self.DG.getCost(schedule) >= 2.0 * self.n: print('ERROR : 0.01:duplicate / 1.0:index {}'.format( self.DG.getCost(schedule))) print('_____________________\n') return 0 print('_____________________\n') serviced = schedule.getServiced(self.n) serviced.sort() non = [] for i in range(-self.n, self.n + 1): if i == 0: continue if i not in serviced: non.append(i) left = [] for shuttle in schedule.shuttles: left += shuttle.trip print('{} serviced'.format(serviced)) print('{} non'.format(non)) print('{} left'.format(left)) print('{} late'.format(self.late)) print('shutN {} / serv {} |non {} |left {} |late {}'.format( len(schedule.shuttles), len(serviced), len(non), len(left), len(self.late))) print('Reject') print('{lr} {r}'.format(r=schedule.rejects, lr=len(schedule.rejects))) if warring > 0: print("ERROR{} : There are {} Shuttles which is NOT serviceable*". format(warring, warring)) V = Visualization() V.drawTrips(self.MG, self.RG, schedule, 'test ' + str(numm)) print('_____________________\n') return warring
def __init__(self, mu, Sigma, R, Q, visualize=True): self.mu = mu self.Sigma = Sigma self.R = R self.Q = Q # You may find it useful to keep a dictionary that maps a # a feature ID to the corresponding index in the mean_pose_handle # vector and covariance matrix self.mapLUT = {} self.visualize = visualize if self.visualize == True: self.vis = Visualization() else: self.vis = None
def predict(self, images, labels): session = MyTrainingModelWrapper.invoke_model() my_image_processor = MyImageProcessor() _imgs = [] for img in images: _imgs.append(my_image_processor.apply_grayscale_and_normalize(img)) imgs = np.array(_imgs) imgs = np.reshape(imgs, (-1, 32, 32, 1)) values, indices = session.run(self.top_k_operations, feed_dict={self.features: imgs}) signnames = Visualization.read_sign_names_from_csv_return() for idx, pset in enumerate(indices): print("") print('=======================================================') print("Correct Sign :", labels[idx], "-", signnames[labels[idx]]) print('-------------------------------------------------------') print('{0:7.2%} : {1: <2} - {2: <40}'.format( values[idx][0], pset[0], signnames[pset[0]])) print('{0:7.2%} : {1: <2} - {2: <40}'.format( values[idx][1], pset[1], signnames[pset[1]])) print('{0:7.2%} : {1: <2} - {2: <40}'.format( values[idx][2], pset[2], signnames[pset[2]])) print('-------------------------------------------------------')
def __init__(self): self.__viz = Visualization() self.__ssm = SSMOperation() self.__LENRATE = 6 / 7.0 self.__TOPK = 5
class StorageVET: """ StorageVET API. This will eventually allow StorageVET to be imported and used like any other python library. """ def __init__(self, model_parameters_path, verbose=False): """ Constructor to initialize the parameters and data needed to run StorageVET. Initialize the Params Object from Model Parameters Args: model_parameters_path (str): Filename of the model parameters CSV or XML that describes the case to be analysed """ self.verbose = verbose # Initialize the Params Object from Model Parameters self.case_dict = Params.initialize( model_parameters_path, verbose) # unvalidated case instances self.results = Result.initialize(Params.results_inputs, Params.case_definitions) if verbose: self.visualization = Visualization(Params) self.visualization.class_summary() def solve(self): """ Run storageVET Returns: the Results class """ starts = time.time() if Params.storagevet_requirement_check(): for key, value in self.case_dict.items(): run = Scenario.Scenario(value) run.add_technology() run.add_services() run.init_financials(value.Finance) run.add_control_constraints() run.optimize_problem_loop() Result.add_instance( key, run) # cost benefit analysis is in the Result class Result.sensitivity_summary() ends = time.time() print("Full runtime: " + str(ends - starts)) if self.verbose else None return Result
def __init__(self, model_parameters_path, verbose=False): """ Constructor to initialize the parameters and data needed to run StorageVET. Initialize the Params Object from Model Parameters Args: model_parameters_path (str): Filename of the model parameters CSV or XML that describes the case to be analysed """ self.verbose = verbose # Initialize the Params Object from Model Parameters self.case_dict = Params.initialize( model_parameters_path, verbose) # unvalidated case instances self.results = Result.initialize(Params.results_inputs, Params.case_definitions) if verbose: self.visualization = Visualization(Params) self.visualization.class_summary()
def __init__(self, numParticles, Alpha, laser, gridmap, visualize=True): self.numParticles = numParticles self.Alpha = Alpha self.laser = laser self.gridmap = gridmap self.visualize = visualize # particles is a numParticles x 3 array, where each column denote a particle_handle # weights is a numParticles x 1 array of particle weights self.particles = None self.weights = None if self.visualize == True: self.vis = Visualization() self.vis.drawGridmap(self.gridmap) else: self.vis = None
def visualize_tweet_by_candidate_scale(): """ Visualize the processed tweets based on the difference of votes between two candidates. """ sum_state_path = 'datasets/sum_state.txt' sum_state = [] tweets_file = open(sum_state_path, "r") for line in tweets_file: try: sum_state = json.loads(line) except: continue gop = ['trump', 'cruz'] dem = ['clinton', 'sanders'] gop_sum = {} dem_sum = {} for candidate, dic in sum_state.items(): if candidate in gop: for state, vote in dic.items(): if state not in gop_sum: gop_sum[state] = {} gop_sum[state][candidate] = vote # initialize other candidate if candidate == gop[0]: gop_sum[state][gop[1]] = 0 if gop[1] not in gop_sum[state] else gop_sum[state][gop[1]] else: gop_sum[state][gop[0]] = 0 if gop[0] not in gop_sum[state] else gop_sum[state][gop[0]] elif candidate in dem: for state, vote in dic.items(): if state not in dem_sum: dem_sum[state] = {} dem_sum[state][candidate] = vote # initialize other candidate if candidate == dem[0]: dem_sum[state][dem[1]] = 0 if dem[1] not in dem_sum[state] else dem_sum[state][dem[1]] else: dem_sum[state][dem[0]] = 0 if dem[0] not in dem_sum[state] else dem_sum[state][dem[0]] v1 = Visualization() v1.ini_scale_hotmap(gop_sum) v1.draw_candmap_scale('gop') v2 = Visualization() v2.ini_scale_hotmap(dem_sum) v2.draw_candmap_scale('dem')
def _plot_stored_data(self, file_name, vis_name): with open(f'../plots/{file_name}.pkl.gz', 'rb') as file: data = pickle.load(file) plot_only = False # True = visualize, False = only plot data_prop = {k: v for k, v in data.items() if type(k) != int} # keep only parameters data = {k: v for k, v in data.items() if type(k) == int} data_prop.update({'plot_only': plot_only, 'store_only_output': False}) vis = Vis(vis_name, **data_prop) for it, data in data.items(): print('plot', it) vis.visualize(data, iteration=it) vis.stop_vis()
def __start_simulation(self): v1 = eval(self.vals["vxtext"].get()) or 0 v2 = eval(self.vals["vytext"].get()) or 0 r1 = eval(self.vals["r1text"].get()) or 0 r2 = eval(self.vals["r2text"].get()) or 0 sp1 = eval(self.vals["xtext"].get()) or 0 sp2 = eval(self.vals["ytext"].get()) or 0 ts = eval(self.vals["ttext"].get()) or 0 m1 = eval(self.vals["m1text"].get()) or 0 m2 = eval(self.vals["m2text"].get()) or 0 s = Simulation((float((sp1)), float((sp2))), (float((v1)), float( (v2))), float((ts)), int(m1), int(m2), float((r1)), float((r2))) self.v = Visualization() for i in s: self.v.draw(i, rad=(r1, r2))
def __init__(self, mu, Sigma, R, Q, visualize=True): self.mu = mu self.Sigma = Sigma self.R = R self.Q = Q self.nfeatures = 0 # step 6 # expand matrix to 3,3 #self.Q = np.pad(self.Q, ((0,1),(0,1)), mode = 'constant', # constant_values = 0.0) # You may find it useful to keep a dictionary that maps a # a feature ID to the corresponding index in the mean_pose_handle # vector and covariance matrix self.mapLUT = {} self.visualize = visualize if self.visualize == True: self.vis = Visualization() else: self.vis = None
def visuals(self): Visualize = Visualization() Visualize.pie_plot_sentiment("Sentiment Spread from Tweets", self.get_pos_tweet_sentiment(), self.get_neg_tweet_sentiment(), self.get_neu_tweet_sentiment()) Visualize.pie_plot_sentiment("Sentiment Spread from Population", self.get_pos_pop_sentiment(), self.get_neg_pop_sentiment(), self.get_neu_pop_sentiment())
def visualize(self): viz_configs = self.configs_module.VisualizationConfigs() viz = Visualization(viz_configs) for configs in self.batch_configs.all_configs: main_results = self.recall_main_results(configs) print('Done Loading Results!') viz.add_to_learning_curve_plot(main_results) viz.finish_learning_curve_plot() #viz.plot_feature_importance_bar_chart() # viz.plot_all() if viz_configs.show_plots: viz.show()
def visualize_tweet_by_state(): """ Visualize the processed tweets based on the count of votes in each state. """ sum_state_path = 'datasets/sum_state.txt' sum_state = [] tweets_file = open(sum_state_path, "r") for line in tweets_file: try: sum_state = json.loads(line) except: continue gop = ['donald', 'trump', 'ted', 'cruz', 'kasich'] dem = ['hillary', 'clinton', 'bernie', 'sanders'] for candidate, dic in sum_state.items(): v = Visualization() v.init_hotmap() for state, vote in dic.items(): v.set_hotmap(state, vote) v.draw_hotmap('Approval Ratings of ' + candidate + ' Based on States', blue=candidate in dem)
def _decode_sample(self, x, topk, average_encoders = False, selected_encoder = -1, visualization = False, rows_visualization = None, cols_visualization = None): attns, rx = self._get_sample_attns(x, average_encoders = average_encoders, selected_encoder = selected_encoder) if visualization: Visualization.visualize_attentions(attns, 16, 16, rows_visualization, cols_visualization) attn = self._get_sample_attn(attns) x_lines = x.split(" . ") lx_lines = len(x_lines) sent_pad_required = max(0, self.document_max_sents - lx_lines) if visualization: Visualization.visualize_attentions(attn, 16, 16, 1, 1) attn = attn[sent_pad_required:, sent_pad_required:] if visualization: Visualization.visualize_attentions(attn, 16, 16, 1, 1) sentence_attn = attn.sum(axis = 0) / attn.shape[0] topk_sentences = sorted(np.argsort(sentence_attn)[::-1][:topk]) if visualization: Visualization.visualize_attentions(sentence_attn, 16, 16, 1, 1) summary = [x_lines[i] for i in topk_sentences] return " . ".join(summary)
def _render_initialize(self) -> None: self.renderer = Visualization(20, self.game.field) self.renderer.reset() self.is_render_initialized = True
class SnakeEnv(gym.Env): metadata = {'render.modes': ['human']} width = 10 height = 20 def __init__(self): self.is_render_initialized = False self.observation_space = spaces.Box(low=0, high=255, shape=[self.height, self.width, 3], dtype=numpy.int) self.action_space = spaces.Discrete(7) self.renderer: Visualization = None self.game: Snake = None self.last_distance = 0 self.training_data: TrainingData = TrainingData() self.reward: RewardSystem = RewardSystem(verbose=False) def _render_initialize(self) -> None: self.renderer = Visualization(20, self.game.field) self.renderer.reset() self.is_render_initialized = True def step(self, action) -> (GameData, float, bool, set): if type(action) is not str: action = Snake.action_set[action] self.reward.clear() state_before_move: GameData = copy.deepcopy(self.game.get_info()) #state_before_move=self.game.get_info() move_was_possible = self.game.event(action) self.reward.for_move_result(move_was_possible) self.game.tick() state_after_move: GameData = self.game.get_info() self.training_data.walk_step() self.last_distance = state_before_move.air_line_distance delta_food: float = self.last_distance - state_after_move.air_line_distance self.reward.for_food_distance(delta_food) if state_after_move.snake_length - self.training_data.last_score > 0: self.reward.for_eating_food() self.training_data.max_number_of_steps += self.reward.steps(self.width, self.height, state_after_move.snake_length) self.training_data.score(state_after_move.snake_length) if self.game.is_game_over(): self.reward.for_game_over(self.game.game_over_reason) if self.training_data.steps_exceeded(): self.reward.for_starvation(self.training_data.number_of_steps_walked, state_after_move.snake_length) done = self.game.is_game_over() or self.training_data.steps_exceeded() state_before_move.game_over = done return state_before_move, self.reward.final_reward, done, {} def reset(self) -> GameData: self.game = Snake(Field(self.width, self.height)) self.last_distance = self.game.get_info().air_line_distance if self.is_render_initialized: self.renderer.reset() self.training_data.next_epoch() self.training_data.max_number_of_steps = 3* self.reward.steps(self.width, self.height, len(self.game.snake)) return self.game.get_info() def render(self, mode='human', close=False) -> None: if not self.is_render_initialized: self._render_initialize() self.renderer.reset() self.renderer.display_visualization_stats() info = self.game.get_info() self.renderer.display_training(self.training_data) self.renderer.display_game(info) self.renderer.tick() pygame.event.pump()
class Interface: def __init__(self): self.init_TkWindow() self.run = Lock() self.v = Visualization() self.update_planet(None) tk.mainloop() def update_planet(self, _): x = eval(self.vals["xtext"].get()) y = eval(self.vals["ytext"].get()) pos = np.array((x, y)) vx = eval(self.vals["vxtext"].get()) vy = eval(self.vals["vytext"].get()) vel = np.array((vx, vy)) * 10 r1 = eval(self.vals["r1text"].get()) r2 = eval(self.vals["r2text"].get()) rad = np.array((r1, r2)) self.v.draw(pos, vel + pos, rad) def init_TkWindow(self): self.window = tk.Tk() self.window.geometry("500x800") self.window.title("Dane") text = tk.StringVar() label = tk.Label(self.window, textvariable=text, padx=100, pady=50, font=("Times New Roman", 20)) text.set("Wprowadź dane symulacji") label.pack() # podpinanie kontrolki pod okno self.vals = { "vxtext": tk.StringVar(value="20"), "vytext": tk.StringVar(value="10"), "xtext": tk.StringVar(value="50"), "ytext": tk.StringVar(value="50"), "ttext": tk.StringVar(value="1/60"), "m1text": tk.StringVar(value="10**10"), "m2text": tk.StringVar(value="100"), "r1text": tk.StringVar(value="20"), "r2text": tk.StringVar(value="10") } tk.Label(self.window, text="Położenie planety, współrzędna x").pack() start_position_x = tk.Entry(self.window, textvariable=self.vals["xtext"], width=40) start_position_x.pack() tk.Label(self.window, text="Położenie planety, współrzędna y").pack() start_position_y = tk.Entry(self.window, textvariable=self.vals["ytext"], width=40) start_position_y.pack() tk.Label(self.window, text="Prędkość początkowa Vx:").pack() velocity_x = tk.Entry(self.window, textvariable=self.vals["vxtext"], width=40) velocity_x.pack() tk.Label(self.window, text="Prędkość początkowa Vy:").pack() velocity_y = tk.Entry(self.window, textvariable=self.vals["vytext"], width=40) velocity_y.pack() tk.Label(self.window, text="Krok czasu:").pack() time_step = tk.Entry(self.window, textvariable=self.vals["ttext"], width=40) time_step.pack() tk.Label(self.window, text="Masa nieruchomej planety").pack() mass1 = tk.Entry(self.window, textvariable=self.vals["m1text"], width=40) mass1.pack() tk.Label(self.window, text="Masa ruchomej planety").pack() mass2 = tk.Entry(self.window, textvariable=self.vals["m2text"], width=40) mass2.pack() tk.Label(self.window, text="Promień nieruchomej planety").pack() radiation1 = tk.Entry(self.window, textvariable=self.vals["r1text"], width=40) radiation1.pack() tk.Label(self.window, text="Promień ruchomej planety").pack() radiation2 = tk.Entry(self.window, textvariable=self.vals["r2text"], width=40) radiation2.pack() break_label = tk.Label(self.window, text=" ") break_label.pack() start_position_x.focus_set() run = tk.Button(self.window, text="Run", width=20, command=self.start_simulation) run.pack() reset = tk.Button(self.window, text="Reset", width=20, command=self.start_simulation) reset.pack() self.window.bind('<KeyRelease>', self.update_planet) def __start_simulation(self): v1 = eval(self.vals["vxtext"].get()) or 0 v2 = eval(self.vals["vytext"].get()) or 0 r1 = eval(self.vals["r1text"].get()) or 0 r2 = eval(self.vals["r2text"].get()) or 0 sp1 = eval(self.vals["xtext"].get()) or 0 sp2 = eval(self.vals["ytext"].get()) or 0 ts = eval(self.vals["ttext"].get()) or 0 m1 = eval(self.vals["m1text"].get()) or 0 m2 = eval(self.vals["m2text"].get()) or 0 s = Simulation((float((sp1)), float((sp2))), (float((v1)), float( (v2))), float((ts)), int(m1), int(m2), float((r1)), float((r2))) self.v = Visualization() for i in s: self.v.draw(i, rad=(r1, r2)) def start_simulation(self): self.t = Thread(target=self.__start_simulation) self.t.start() def restart_simulation(self): self.start_simulation()
def __init__(self): self.init_TkWindow() self.run = Lock() self.v = Visualization() self.update_planet(None) tk.mainloop()
class EKFSLAM(object): # Construct an EKF instance with the following set of variables # mu: The initial mean vector # Sigma: The initial covariance matrix # R: The process noise covariance # Q: The measurement noise covariance # visualize: Boolean variable indicating whether to visualize # the filter def __init__(self, mu, Sigma, R, Q, visualize=True): self.mu = mu self.Sigma = Sigma self.R = R self.Q = Q # You may find it useful to keep a dictionary that maps a # a feature ID to the corresponding index in the mean_pose_handle # vector and covariance matrix self.mapLUT = {} self.visualize = visualize if self.visualize == True: self.vis = Visualization() else: self.vis = None # Visualize filter strategies # deltat: Step size # XGT: Array with ground-truth pose def render(self, XGT=None): deltat = 0.1 self.vis.drawEstimates(self.mu, self.Sigma) if XGT is not None: #print XGT self.vis.drawGroundTruthPose (XGT[0], XGT[1], XGT[2]) plt.pause(deltat/10) # Perform the prediction step to determine the mean and covariance # of the posterior belief given the current estimate for the mean # and covariance, the control data, and the process model # u: The forward distance and change in heading def prediction(self, u): F = np.zeros((3, 3)) noise = np.random.normal(0, self.R[0, 0]) F[0, 0] = 1 F[1, 1] = 1 F[2, 2] = 1 F[0, 2] = -1 * (u[0]+noise) * np.sin(self.mu[2]) F[1, 2] = (u[0]+noise) * np.cos(self.mu[2]) u1 = u[0] u2 = u[1] self.mu[0] = self.mu[0] + u1*np.cos(self.mu[2]) self.mu[1] = self.mu[1] + u1*np.sin(self.mu[2]) self.mu[2] = self.mu[2] + u2 upper_left = self.Sigma[0:3, 0:3] upper_right = self.Sigma[0:3, 3:] bot_left = self.Sigma[3:, 0:3] bot_right = self.Sigma[3:, 3:] temp = np.matmul(F, upper_left) temp = np.matmul(temp, np.transpose(F)) temp[0, 0] = temp[0, 0] + self.R[0, 0] temp[1, 1] = temp[1, 1] + self.R[0, 0] temp[2, 2] = temp[2, 2] + self.R[1, 1] upper_left = temp upper_right = np.matmul(F, upper_right) bot_left = np.matmul(bot_left, np.transpose(F)) up = np.concatenate((upper_left, upper_right), axis=1) bot = np.concatenate((bot_left, bot_right), axis=1) if not bot.shape[1] == 0: self.Sigma = np.concatenate((up, bot), axis=0) # Perform the measurement update step to compute the posterior # belief given the predictive posterior (mean and covariance) and # the measurement data # z: The (x,y) position of the landmark relative to the robot # i: The ID of the observed landmark def update(self, z, i): mIdx = self.mapLUT[i] xt = self.mu[0] yt = self.mu[1] thetat = self.mu[2] xm = self.mu[mIdx] ym = self.mu[mIdx+1] H = np.zeros((2, self.mu.shape[0])) H[0, 0] = (-1)*np.cos(thetat) H[0, 1] = (-1)*np.sin(thetat) H[0, 2] = (-1)*xm*np.sin(thetat) + xt*np.sin(thetat) + ym*np.cos(thetat) - yt*np.cos(thetat) H[0, mIdx] = np.cos(thetat) H[0, mIdx+1] = np.sin(thetat) H[1, 0] = np.sin(thetat) H[1, 1] = (-1)*np.cos(thetat) H[1, 2] = (-1)*xm*np.cos(thetat) + xt*np.cos(thetat) - ym*np.sin(thetat) + yt*np.sin(thetat) H[1, mIdx] = (-1)*np.sin(thetat) H[1, mIdx+1] = np.cos(thetat) inv_temp = np.matmul(H, self.Sigma) inv_temp = np.matmul(inv_temp, np.transpose(H)) inv_temp = inv_temp + self.Q temp = np.matmul(self.Sigma, np.transpose(H)) k = np.matmul(temp, inv(inv_temp)) temp0 = xm*np.cos(thetat) - xt*np.cos(thetat) + ym*np.sin(thetat) - yt*np.sin(thetat) temp1 = (-1)*xm*np.sin(thetat) + xt*np.sin(thetat) + ym*np.cos(thetat) - yt*np.cos(thetat) hu = np.array([temp0[0], temp1[0]]) gain = np.matmul(k, z - hu) gain = np.reshape(gain, (self.mu.shape[0], 1)) self.mu = self.mu + gain I = np.eye(self.mu.shape[0]) temp = np.matmul(k, H) temp = I - temp self.Sigma = np.matmul(temp, self.Sigma) # Augment the state vector to include the new landmark # z: The (x,y) position of the landmark relative to the robot # i: The ID of the observed landmark def augmentState(self, z, i): self.mapLUT[i] = 3 + 2*len(self.mapLUT) G = np.zeros((2, self.mu.shape[0])) G[0, 0] = 1 G[0, 2] = (-1)*z[0] * np.sin(self.mu[2]) - z[1] * np.cos(self.mu[2]) G[1, 1] = 1 G[1, 2] = z[0] * np.cos(self.mu[2]) - z[1] * np.sin(self.mu[2]) xm = np.array([self.mu[0] + z[0] * np.cos(self.mu[2]) - z[1] * np.sin(self.mu[2])]) ym = np.array([self.mu[1] + z[0] * np.sin(self.mu[2]) + z[1] * np.cos(self.mu[2])]) self.mu = np.concatenate((self.mu, xm), axis=0) self.mu = np.concatenate((self.mu, ym), axis=0) upper_left = self.Sigma upper_right = np.matmul(self.Sigma, np.transpose(G)) bot_left = np.matmul(G, self.Sigma) temp = np.matmul(G, self.Sigma) temp = np.matmul(temp, np.transpose(G)) bot_right = temp + self.Q up = np.concatenate((upper_left, upper_right), axis=1) bot = np.concatenate((bot_left, bot_right), axis=1) if not bot.shape[1] == 0: self.Sigma = np.concatenate((up, bot), axis=0) # Update mapLUT to include the new landmark # Runs the EKF SLAM algorithm # U: Array of control inputs, one column per time step # Z: Array of landmark observations in which each column # [t; id; x; y] denotes a separate measurement and is # represented by the time step (t), feature id (id), # and the observed (x, y) position relative to the robot # XGT: Array of ground-truth poses (may be None) def run(self, U, Z, XGT=None, MGT=None): # Draws the ground-truth map if MGT is not None: self.vis.drawMap (MGT) print("init") print(self.mu) print(self.R) print(self.Q) # Iterate over the data zIdx = 0 for t in range(U.shape[1]): #for t in range(0, 1): print(t) u = U[:,t] self.prediction(u) if zIdx < U.shape[1]: while Z[0, zIdx] == t: if Z[1, zIdx] in self.mapLUT: self.update(Z[2:, zIdx], Z[1, zIdx]) else: self.augmentState(Z[2:, zIdx], Z[1, zIdx]) zIdx = zIdx + 1 # You may want to call the visualization function # between filter steps if self.visualize: if XGT is None: self.render (None) else: self.render (XGT[:,t]) plt.savefig("small_noise")
class Visual(Algorithm): """ This class is a default visualizer that can be used to wrap any algorithm for visualization. It basically delegates all methods to the algorithm passed in the constructor but paints the game data on its way. """ def __init__(self, algorithm: Algorithm): super().__init__() self.decider: Algorithm = algorithm self.vis = Visualization(20, Field(10, 20)) def decide(self, info: GameData) -> str: return self.decider.decide(info) def epochfinished(self) -> (object, float): return self.decider.epochfinished() def train(self, info: GameData, action, reward) -> None: return self.decider.train(info, action, reward) def visualize(self, data: GameData, training: TrainingData): layer = self.decider.visualize(data, training) self.vis.reset() self.vis.display_visualization_stats() self.vis.display_training(training) self.vis.display_game(data) self.vis.add_layer(layer) self.vis.tick()
# Processing p = Processing(obs=BDclim, mnt=IGN, nwp=AROME, model_path=prm['model_path'], GPU=prm["GPU"], data_path=prm['data_path']) # Predictions array_xr = p.predict_at_stations(prm["stations_to_predict"], verbose=True, Z0_cond=prm["Z0"], peak_valley=prm["peak_valley"]) # Visualization v = Visualization(p) # Evaluation e = Evaluation(v, array_xr) # Store nwp, cnn predictions and observations for station in prm["stations_to_predict"]: nwp, cnn, obs = e._select_dataframe(array_xr, station_name=station, day=None, month=month, year=year, variable=prm["variable"], rolling_mean=None, rolling_window=None) results["nwp"][station].append(nwp) results["cnn"][station].append(cnn) results["obs"][station].append(obs) del p
class LyricsForm: def __init__(self): self.__viz = Visualization() self.__ssm = SSMOperation() self.__LENRATE = 6 / 7.0 self.__TOPK = 5 def formAnalysis(self, SSM, isViz = False): """ 視覺化Matrix """ if isViz: self.__SSMViz(SSM) """ NMF test from nmf import nmf n = SSM.shape[0] r = 5 w = numpy.random.random([n, r]) h = numpy.random.random([r, n]) (wo,ho) = nmf(SSM, w, h, 0.001, 1000, 100) if isViz: self.__SSMViz(numpy.dot(wo, ho)) for k in range(r): print k wmask = numpy.zeros((n, r)) wmask[:, k] = numpy.ones(n) hmask = numpy.zeros((r, n)) hmask[k, :] = numpy.ones(n) self.__SSMViz(numpy.dot(wo * wmask, hmask * ho)) """ """ 計算 SSM 中所有的 Block Family """ startTime = time.time() self.__allFamilyM = self.__allBlockFamily(SSM) endTime = time.time() print "LyricsForm: Family Matrix Construction Time = %.2fsec" % (endTime - startTime) self.__allFamilyM = numpy.insert(self.__allFamilyM, 0, 0, axis = 0) """ Block Family Combination """ startTime = time.time() ff = FormFinder(self.__allFamilyM, self.__TOPK) combineList = ff.computing() endTime = time.time() print "LyricsForm: Block Combination Time = %.2fsec" % (endTime - startTime) print """ combineList 轉換成詞式格式 """ lineNum = SSM.shape[0] formList = self.__resultForm(combineList, lineNum) return formList def __resultForm(self, combineList, lineNum): resultList = [] for combine in combineList: lyricsLine = set(range(1, lineNum + 1)) form = [] familyList = [] cohesionList = [] for coor in combine["coors"]: cohesion = self.__allFamilyM[coor[0]][coor[1]]["cohesion"] family = self.__allFamilyM[coor[0]][coor[1]]["family"] lineNumFamily = map(lambda block: [block[0] + 1, block[1] + 1], family) familyList.append(lineNumFamily) cohesionList.append(cohesion) for block in lineNumFamily: start = block[0] end = block[1] lyricsLine -= set(range(start, end + 1)) """ 判斷副歌 """ maxCohesion = max(cohesionList) tempList = [] for i in range(len(cohesionList)): if cohesionList[i] == maxCohesion: tempList.append((i, len(familyList[i]))) idx = numpy.argmax(map(lambda pair: pair[1], tempList)) chorusIdx = tempList[idx][0] chorus = {"label": "chorus", "group": familyList[chorusIdx]} form.append(chorus) familyList.pop(chorusIdx) """ 判斷主歌 """ for i in range(len(familyList)): verse = {"label": "verse" + str(i + 1), "group": familyList[i]} form.append(verse) """ 判斷前段、橋段與尾聲 """ if len(lyricsLine) > 0: lyricsLine = list(lyricsLine) prevLineNum = lyricsLine[0] - 1 block = [] remainBlocks = [] for i in range(0, len(lyricsLine)): if prevLineNum + 1 != lyricsLine[i]: """ 一個 block 形成 """ remainBlocks.append([ block[0], block[-1] ]) block = [lyricsLine[i]] else: block.append(lyricsLine[i]) prevLineNum = lyricsLine[i] remainBlocks.append([ block[0], block[-1] ]) """ 加入前段 """ if remainBlocks[0][0] == 1: form.append({"label": "intro", "group": [remainBlocks[0]]}) """ 加入橋段 """ if len(remainBlocks[1:-2]) > 0: form.append({"label": "bridge", "group": remainBlocks[1:-2]}) """ 加入尾聲 """ if remainBlocks[-1][1] == lineNum: form.append({"label": "outro", "group": [remainBlocks[-1]]}) resultList.append({"score": combine["score"], "form": form}) return resultList def __allBlockFamily(self, M): M = copy.deepcopy(M) """ 記錄所有 family 的資料結構 table """ familyM = [] for i in range(M.shape[0] / 2): familyM.append([None] * M.shape[0]) """ 將 SSM 中相似度為 1.0 的值去除,記錄到 exOneArray 中 """ exOneArray = numpy.extract(M != 1.0, M) """ 計算 Children 與 Parent 之間的相似度(Similarity)門檻值 """ simT = exOneArray.mean() + exOneArray.std() #simT = exOneArray.mean() """ 產生 ChildrenFinder 物件 cf,並且傳入這首歌詞的總行數 """ cf = ChildrenFinder() """ 計算所有的 Parent Block (start line & size) 的 Children """ for size in range(4, len(M) / 2 + 1): for start in range(0, M.shape[0] - size): """ 建立 SSM 的 Corridor(廊道) Matrix """ corridorMask = numpy.zeros(M.shape) corridorMask[start: start + size] = 1 corridorM = M * corridorMask """ 找出 start 到 start + size parent block 所框出的 children matrix 範圍 """ childrenMatrix = M[start: start + size, start + size: M.shape[1] ] """ 計算 Children 與 Parent 之間的長度(Length)門檻值 """ lenT = math.ceil(float(size) * self.__LENRATE) #lenT = float(size) #if lenT > 7: # lenT -= 1 """ 利用 Children Finding Algorithm 計算出此 Parent 最佳的 Repeating Pattern 所形成的 Children """ blockFamily = cf.children(childrenMatrix, lenT = lenT, simT = simT) """ 有找到 children 才需要進一步考慮 """ if blockFamily != []: """ Family Block Range 移動到絕對位置的 Range,Family 的 Block 都是從 第1行 開始算起 並且加入 Parent Block 本身到 Block Family 中 """ for i in range(len(blockFamily)): blockFamily[i] = [lineNum + (start + size) for lineNum in blockFamily[i]] blockFamily.insert(0, [start, start + size - 1]) """ 計算此 family 所形成的 complete graph G(V, E) ,E 上的 weight 表示兩個 block 之間的相似度 以及 family 的 cohesion """ familyGraph, familyCohesion = self.__familyGraphBuild(blockFamily, M) """ family 的覆蓋長度總合 """ familyCoverage = sum(map(lambda block: block[1] - block[0] + 1, blockFamily)) """ 將 familyBlock 記錄到 Block Matrix """ familyM[size - 1][start] = {"graph": familyGraph, "family": blockFamily, "cohesion": familyCohesion, "coverage": familyCoverage} """ 視覺化檢查工具 """ #self.__viz.grayMatrix(corridorM, "Row Mask SSM: start= " + str(start) + " size= " + str(size)) #pathMask = cf.getPathMask() #familyMask = cf.getFamilyMask() #corridorM[start: start + size, start + size: M.shape[1] ] = pathMask #self.__viz.grayMatrix(corridorM, "Path Mask") #corridorM[start: start + size, start + size: M.shape[1] ] = familyMask #self.__viz.grayMatrix(corridorM, "Family Mask") return familyM def __familyGraphBuild(self, family, M): """ 建立 family 的 complete graph """ familyGraph = nx.complete_graph(len(family)) cohesion = 0.0 for i in range(len(family) - 1): for j in range(i + 1, len(family)): """ 選擇長度較短的 block length 為 i 軸 """ shortIdx = i longIdx = j shortLen = family[i][1] - family[i][0] + 1 longLen = family[j][1] - family[j][0] + 1 if shortLen > longLen: """ 交換 """ shortIdx, longIdx = j, i shortLen, longLen = longLen, shortLen sim = 0.0 iSlice = slice( family[shortIdx][0], family[shortIdx][1] + 1 ) windowSize = shortLen """ 計算兩個 block 的相似度所需跑的迴圈數 """ loopNum = longLen - shortLen + 1 for offset in range(loopNum): jSlice = slice( family[longIdx][0] + offset, family[longIdx][0] + windowSize + 1) tempSim = M[iSlice, jSlice].trace() / windowSize if tempSim > sim: sim = tempSim """ 將計算好的相似度(sim)放入 family graph 的 edge 上 """ familyGraph[i][j]["sim"] = sim cohesion += sim cohesion = cohesion / (len(family) * (len(family) - 1) / 2.0) #print "block family", family #print "family graph", familyGraph.edge #print "cohesion", cohesion #raw_input() return familyGraph, cohesion def SSMGen(self, lines, simObject, matrixType='sim'): """ 產生自比較矩陣 Self Matrix """ startTime = time.time() """ 宣告句子相似度矩陣 type: numpy.array """ SSM = numpy.zeros([len(lines), len(lines)]) """ Self Matrix 建立 """ for i in range(len(lines)): for j in range(i, len(lines)): lineSim = simObject.similarity(lines[i][:], lines[j][:]) """ 對稱的矩陣 """ SSM[i][j] = SSM[j][i] = lineSim """ 計算 Matrix 中最大的數值是多少 """ maxValue = SSM.max() print "LyricsForm: Matrix Max Value : %f" % maxValue """ 因為 DTW 演算法計算兩序列的距離,如果超過無限大,則會為回傳 -1 所以,如果 Matrix 中存在負的值,便將此值設為 Matrix 中的最大數值的 """ for rowIdx in range(SSM.shape[0]): SSM[rowIdx] = map(lambda ele: ele < 0.0 and maxValue or ele, SSM[rowIdx]) """ 如果 Similarity Object 是計算距離的話,就將 Matrix 中的數值從距離轉換成相似度,值越大越像 """ #print simObject.__class__.__name__ #if 'Dist' in simObject.__class__.__name__: if 'dist' in matrixType: #由 Matrix 中最大的距離來當作最低的相似度 """ minDist = M.min() maxDist = M.max() M = 1 - ((M - minDist) / (maxDist - minDist)) """ maxM = numpy.ones(SSM.shape) * maxValue SSM = maxM - SSM SSM = self.__ssm.localNormalize(SSM) endTime = time.time() print "LyricsForm: SSM Construction Time = %.2fsec" % (endTime - startTime) print "LyricsForm: Matrix Shape = %s" % str(SSM.shape) #print "LyricsForm: SSM Visualization..." #self.__SSMViz(SSM) return SSM def __SSMViz(self, SSM): """ 建立好的 Self Matrix 裡頭的每個 Element 有可能是 Distance 也有可能是 Similarity 目前是將 Distance 都轉換成 Similarity 最後會得到一個 SSM """ """ 將 SSM 做 Local Normalize,也就是除以 SSM 中的最大值,讓 SSM 中的值介在 [0, 1] 此步驟只對 Distance Matrix 以及 沒有 Normalize 的 Similarity Matrix 有效果 """ #self.__viz.grayMatrix(SSM, "Local Normalized SSM: " + self.__simObjClassName) self.__viz.grayMatrix(SSM, "Local Normalized SSM: ") #SSM = self.__ssm.secondOrder(SSM) #self.__viz.grayMatrix(SSM, "Second Order SSM: " + self.__simObjClassName) """ 擷取 Exact Path 出現的位置 """ #exactMask = self.__ssm.masker(SSM, 1.0) #self.__viz.grayMatrix(exactMask, "Exact Mask: " + self.__simObjClassName) """ SSM Enhancement enhance 函數的最後一個值是設定 L, L = 4 """ #enM = self.__ssm.enhance(SSM, 2) #self.__viz.grayMatrix(enM, "Enhanced SSM: " + self.__simObjClassName) """ Higher Order Matrix """ #SSM = self.__ssmSecondOrder(SSM) #SSM = self.__ssmNormalization(SSM) #self.__viz.grayMatrix(SSM, "Second Order SSM: " + self.__simObjClassName) """ 擷取 Enhanced Matrix 中的 Approximate Path 出現的位置 門檻值 = 平均值 + 一個標準差 """ #threshold = enM.mean() + enM.std() #approxMask = self.__ssm.masker(enM, threshold) #self.__viz.grayMatrix(approxMask, "Approximate Mask: " + self.__simObjClassName) """ 將 exact mask 與 approximate mask 做聯集 """ #mask = map(numpy.bitwise_or, approxMask, exactMask) #self.__viz.grayMatrix(mask, "Total Mask: " + self.__simObjClassName) #self.__viz.grayMatrix(SSM * mask, "Original SSM Mask: " + self.__simObjClassName) #self.__viz.grayMatrix(enM * mask, "Enhanced SSM Mask: " + self.__simObjClassName) def __matrix2ssm(self, M): M = copy.deepcopy(M) """ 計算 Matrix 中最大的數值是多少 """ maxValue = M.max() print "LyricsForm: Matrix Max Value : %f" % maxValue """ 因為 DTW 演算法計算兩序列的距離,如果超過無限大,則會為回傳 -1 所以,如果 Matrix 中存在負的值,便將此值設為 Matrix 中的最大數值的 """ for rowIdx in range(M.shape[0]): M[rowIdx] = map(lambda ele: ele < 0.0 and maxValue or ele, M[rowIdx]) """ 如果 Similarity Object 是計算距離的話,就將 Matrix 中的數值從距離轉換成相似度,值越大越像 """ if 'Dist' in self.__simObjClassName: #由 Matrix 中最大的距離來當作最低的相似度 """ minDist = M.min() maxDist = M.max() M = 1 - ((M - minDist) / (maxDist - minDist)) """ mm = numpy.ones(M.shape) * maxValue M = mm - M return M
''' from sklearn.linear_model import LinearRegression from RegressionModel import RegressionModel from MLData import MLData from Visualization import Visualization class LLS(RegressionModel): """ This class deals with K Nearest Neighbour Classifier """ def __init__(self): self.regressor = LinearRegression() if __name__ == "__main__": data = MLData() data.loadData("../data/mobile.csv", 2, 2) #fit the classifier to the data lls = LLS() lls.fitRegressor(data) #draw line coef = lls.regressor.coef_.flatten().tolist() + [lls.regressor.intercept_] print ("LLS line coef", coef) vis = Visualization() vis.visualizeData(data, coef)
def __init__(self, algorithm: Algorithm): super().__init__() self.decider: Algorithm = algorithm self.vis = Visualization(20, Field(10, 20))
def visualize_tweet_by_candidate_scale(): """ Visualize the processed tweets based on the difference of votes between two candidates. """ sum_state_path = 'datasets/sum_state.txt' sum_state = [] tweets_file = open(sum_state_path, "r") for line in tweets_file: try: sum_state = json.loads(line) except: continue gop = ['trump', 'cruz'] dem = ['clinton', 'sanders'] gop_sum = {} dem_sum = {} for candidate, dic in sum_state.items(): if candidate in gop: for state, vote in dic.items(): if state not in gop_sum: gop_sum[state] = {} gop_sum[state][candidate] = vote # initialize other candidate if candidate == gop[0]: gop_sum[state][gop[1]] = 0 if gop[1] not in gop_sum[ state] else gop_sum[state][gop[1]] else: gop_sum[state][gop[0]] = 0 if gop[0] not in gop_sum[ state] else gop_sum[state][gop[0]] elif candidate in dem: for state, vote in dic.items(): if state not in dem_sum: dem_sum[state] = {} dem_sum[state][candidate] = vote # initialize other candidate if candidate == dem[0]: dem_sum[state][dem[1]] = 0 if dem[1] not in dem_sum[ state] else dem_sum[state][dem[1]] else: dem_sum[state][dem[0]] = 0 if dem[0] not in dem_sum[ state] else dem_sum[state][dem[0]] v1 = Visualization() v1.ini_scale_hotmap(gop_sum) v1.draw_candmap_scale('gop') v2 = Visualization() v2.ini_scale_hotmap(dem_sum) v2.draw_candmap_scale('dem')