def get_balanced_dataset(in_memory=False, TMP_WHOLE_UNBALANCED=False): from ActiveLearning.LargeDatasetHandler_AL import LargeDatasetHandler_AL import Settings # init structures import mock args = mock.Mock() args.name = "test" settings = Settings.Settings(args) WholeDataset = LargeDatasetHandler_AL(settings) # load paths of our favourite dataset! import DataLoader, DataPreprocesser, Debugger import DatasetInstance_OurAerial dataLoader = DataLoader.DataLoader(settings) debugger = Debugger.Debugger(settings) #h5_file = settings.large_file_folder + "datasets/OurAerial_preloadedImgs_subBAL3.0_1.0_sel2144_res256x256.h5" h5_file = settings.large_file_folder + "datasets/OurAerial_preloadedImgs_subBAL3.0_1.0_sel2144_res256x256_SMALLER.h5" datasetInstance = DatasetInstance_OurAerial.DatasetInstance_OurAerial( settings, dataLoader, "256_cleanManual") if not TMP_WHOLE_UNBALANCED: # ! this one automatically balances the data + deletes misfits in the resolution data, paths = datasetInstance.load_dataset() lefts_paths, rights_paths, labels_paths = paths print("Paths: L,R,Y ", len(lefts_paths), len(rights_paths), len(labels_paths)) else: # ! this one loads them all (CHECK: would some be deleted?) paths = datasetInstance.load_dataset_ONLY_PATHS_UPDATE_FROM_THE_OTHER_ONE_IF_NEEDED( ) lefts_paths, rights_paths, labels_paths = paths print("Paths: L,R,Y ", len(lefts_paths), len(rights_paths), len(labels_paths)) WholeDataset.initialize_from_just_paths(paths) if in_memory: assert not TMP_WHOLE_UNBALANCED #WholeDataset.keep_it_all_in_memory() WholeDataset.keep_it_all_in_memory(h5_file) npy_path = settings.large_file_folder + "datasets/OurAerial_preloadedImgs_BALCLASS.npy" I_WANT_TO_RECOMPUTE_THE_LABELS = False if I_WANT_TO_RECOMPUTE_THE_LABELS: assert False # don't want to mistakenly recompute these ... WholeDataset.compute_per_tile_class_in_batches() WholeDataset.save_per_tile_class(npy_path) WholeDataset.load_per_tile_class(npy_path) WholeDataset.report() return WholeDataset
def __init__(self, settings, BACKBONE='resnet34', verbose=1): self.settings = settings self.debugger = Debugger.Debugger(settings) self.verbose = verbose self.use_sigmoid_or_softmax = 'softmax' assert self.use_sigmoid_or_softmax == 'softmax' #BACKBONE = 'resnet34' #BACKBONE = 'resnet50' #batch 16 #BACKBONE = 'resnet101' #batch 8 #BACKBONE = 'seresnext50' #trying batch 16 as well custom_weights_file = "imagenet" #weights from imagenet finetuned on aerial data specific task - will it work? will it break? #custom_weights_file = "/scratch/ruzicka/python_projects_large/AerialNet_VariousTasks/model_UNet-Resnet34_DSM_in01_95percOfTrain_8batch_100ep_dsm01proper.h5" resolution_of_input = None #resolution_of_input = 256 self.model = self.create_model(backbone=BACKBONE, custom_weights_file=custom_weights_file, input_size=resolution_of_input, channels=3) if self.verbose >= 2: self.model.summary()
def __init__(self, settings, dataset): self.settings = settings self.dataset = dataset self.dataPreprocesser = dataset.dataPreprocesser self.debugger = Debugger.Debugger(settings) self.use_sigmoid_or_softmax = 'softmax' assert self.use_sigmoid_or_softmax == 'softmax' #BACKBONE = 'resnet34' #BACKBONE = 'resnet50' #batch 16 #BACKBONE = 'resnet101' #batch 8 BACKBONE = settings.model_backend custom_weights_file = "imagenet" #weights from imagenet finetuned on aerial data specific task - will it work? will it break? #custom_weights_file = "/scratch/ruzicka/python_projects_large/AerialNet_VariousTasks/model_UNet-Resnet34_DSM_in01_95percOfTrain_8batch_100ep_dsm01proper.h5" #resolution_of_input = self.dataset.datasetInstance.IMAGE_RESOLUTION resolution_of_input = None self.model = self.create_model(backbone=BACKBONE, custom_weights_file=custom_weights_file, input_size = resolution_of_input, channels = 3) self.model.summary() self.local_setting_batch_size = settings.train_batch #8 #32 self.local_setting_epochs = settings.train_epochs #100 self.train_data_augmentation = True # saving paths for plots ... self.save_plot_path = "plots/"
def __init__(self, settings, dataset): self.settings = settings self.dataset = dataset self.dataPreprocesser = dataset.dataPreprocesser self.debugger = Debugger.Debugger(settings) self.use_sigmoid_or_softmax = 'softmax' assert self.use_sigmoid_or_softmax == 'softmax' #BACKBONE = 'resnet34' #BACKBONE = 'resnet50' #batch 16 #BACKBONE = 'resnet101' #batch 8 BACKBONE = settings.model_backend custom_weights_file = "imagenet" #resolution_of_input = self.dataset.datasetInstance.IMAGE_RESOLUTION resolution_of_input = None self.model = self.create_model(backbone=BACKBONE, custom_weights_file=custom_weights_file, input_size=resolution_of_input, channels=3) self.model.summary() self.local_setting_batch_size = settings.train_batch #8 #32 self.local_setting_epochs = settings.train_epochs #100 self.train_data_augmentation = True # saving paths for plots ... self.save_plot_path = "plots/"
def Run(source: str, debug: bool): try: prog = Program(source) preproc = Preprocessor() preproc.preprocess(prog) # for l in prog.preprocessed: # print(l) assembler = Assembler() assembler.assemble(prog) # for l in prog.labels: # print(l, f" Position: {l.position}") # for i in prog.instructions: # print(i, f" Position: {i.position} Label: {i.labelName}") # for p in i.parameters: # print(" ", p, end = "") # if p.labelName != None: # print(f" {p.labelName}") # else: # print("") # for b in prog.binary: # print("%04X " % b, end = "") # print("") computer = Computer() computer.loadProgram(prog) if debug == False: computer.run() for l in prog.labels: if l.size > 0: print("%13s (%6s[%3d]): " % (l.name, l.datatype, l.size), end ="") for i in range(l.position, l.position + l.size): print("%d " % computer.memory[i], end = "") print("") else: debugger = Debugger(computer, prog) debugger.run() except PreprocessorError as e: print(e) except AssemblerError as e: print(e) except CompilerError as e: print(e) except Exception as e: raise e
def __init__(self, verbose): self.debugger = Debugger(verbose) try: urllib.request.urlopen('http://google.com') except: self.debugger.rise_Error( "Internet Connection Cannot Be Established") exit(1)
def __init__(self, settings, dataLoader, variant="256_cleanManual"): self.settings = settings self.dataLoader = dataLoader self.debugger = Debugger.Debugger(settings) self.DEBUG_TURN_OFF_BALANCING = False self.variant = variant # 256 = 256x256, 112 = 112x112 self.local_setting_skip_rows = 2 self.local_setting_skip_columns = 2 self.save_path_ = "OurAerial_preloadedImgs_sub" if self.variant == "256_cleanManual": self.dataset_version = "256x256_cleanManual" self.SUBSET = -1 self.IMAGE_RESOLUTION = 256 self.CHANNEL_NUMBER = 4 self.LOAD_BATCH_INCREMENT = 10000 # loads in this big batches for each balancing self.default_raster_shape = (256, 256, 4) self.default_vector_shape = (256, 256) # self.hdf5_path = self.settings.large_file_folder + "datasets/OurAerial_preloadedImgs_subBAL3.0_1.0_sel2144_res256x256.h5" self.hdf5_path = self.settings.large_file_folder + "datasets/OurAerial_preloadedImgs_subBAL3.0_1.0_sel2144_res256x256_SMALLER.h5" self.bigger_than_percent = 3.0 # try? self.smaller_than_percent = 1.0 # there shouldn't be much noise in this ... self.split_train = 1900 self.split_val = 2000 elif self.variant == "6368_special": self.local_setting_skip_rows = 0 self.local_setting_skip_columns = 0 self.dataset_version = "6368_special" self.SUBSET = None #all self.IMAGE_RESOLUTION = 6368 self.CHANNEL_NUMBER = 4 self.LOAD_BATCH_INCREMENT = 20 # from 14 images self.bigger_than_percent = 0.0 # doesn't make much sense here! self.smaller_than_percent = 0.0 # doesn't make much sense here! self.default_raster_shape = (6368, 6368, 4) self.default_vector_shape = (6368, 6368) # decent dataset: self.hdf5_path = self.settings.large_file_folder + "datasets/OurAerial_preloadedImgs_subBAL0.0_0.0_sel13_res6368x6368.h5" # spliting <14> # 0 train, 0 val, 14 test self.split_train = 0 self.split_val = 0 self.DEBUG_TURN_OFF_BALANCING = True
def __init__(self, settings, init_source=1): self.settings = settings self.dataLoader = DataLoader.DataLoader(settings) self.debugger = Debugger.Debugger(settings) if init_source == 1: self.init_from_stable_datasets() else: print("Init manually from data and labels") self.datasetInstance = None self.dataPreprocesser = None
def open_debugger(self): if self.interp.rpcclt: dbg_gui = RemoteDebugger.start_remote_debugger( self.interp.rpcclt, self) else: dbg_gui = Debugger.Debugger(self) self.interp.setdebugger(dbg_gui) dbg_gui.load_breakpoints() sys.ps1 = "[DEBUG ON]\n>>> " self.showprompt() self.set_debugger_indicator()
def get_unbalanced_dataset(in_memory=False): assert in_memory == False # prep to move the dataset to >> /cluster/work/igp_psr/ruzickav << # instead of loading indiv files, load batches in h5 files from ActiveLearning.LargeDatasetHandler_AL import LargeDatasetHandler_AL import Settings # init structures import mock args = mock.Mock() args.name = "test" settings = Settings.Settings(args) WholeDataset = LargeDatasetHandler_AL(settings) # load paths of our favourite dataset! import DataLoader, DataPreprocesser, Debugger import DatasetInstance_OurAerial dataLoader = DataLoader.DataLoader(settings) debugger = Debugger.Debugger(settings) datasetInstance = DatasetInstance_OurAerial.DatasetInstance_OurAerial( settings, dataLoader, "256_cleanManual") # ! this one loads them all (CHECK: would some be deleted?) paths = datasetInstance.load_dataset_ONLY_PATHS_UPDATE_FROM_THE_OTHER_ONE_IF_NEEDED( ) lefts_paths, rights_paths, labels_paths = paths print("Paths: L,R,Y ", len(lefts_paths), len(rights_paths), len(labels_paths)) WholeDataset.initialize_from_just_paths(paths) if in_memory: WholeDataset.keep_it_all_in_memory() npy_path = settings.large_file_folder + "datasets/OurAerial_preloadedImgs_unBALCLASS.npy" I_WANT_TO_RECOMPUTE_THE_LABELS = False if I_WANT_TO_RECOMPUTE_THE_LABELS: assert False # don't want to mistakenly recompute these ... WholeDataset.compute_per_tile_class_in_batches() WholeDataset.save_per_tile_class(npy_path) WholeDataset.load_per_tile_class(npy_path) WholeDataset.report() return WholeDataset
def simulate(canvas): global theCanvas theCanvas = canvas mname = canvas.statusbar.getState(StatusBar.MODEL)[1][0] if not mname: mname = "Nonamed.des" else: if mname.endswith(".py"): mname = mname[:len(mname) - 3] mname = mname + ".des" global sc sc = generate_description(canvas, 0) global debugger debugger = Debugger() global eventhandler eventhandler = EventHandler(mname, callback=debugger.EventDebugger, use_gui=1, modeltext=sc["desc"]) eventhandler.final.append("SVMAToM3Plugin.finalize_simulation()") debugger.CustomizeEvent(event_callback, None, 1) global root root = canvas.ASGroot.listNodes DefaultInterpreter.runsource("eventhandler=SVMAToM3Plugin.eventhandler") DefaultInterpreter.runsource("debugger=SVMAToM3Plugin.debugger") debugger.SetEventHandler(eventhandler) eventhandler.run_initializer() highlight_states(eventhandler.state, sc) highlight_trans(eventhandler, sc, root) # Cannot start the Tk mainloop again # eventhandler.run_interactor() DefaultInterpreter.runsource( "setup_gui_debugger(eventhandler, debugger, 0, 0)")
def __init__(self, settings, create_inmemory_or_ondemand = "ondemand"): self.settings = settings self.KEEP_IT_IN_MEMORY_OR_LOAD_ON_DEMAND = create_inmemory_or_ondemand #self.KEEP_IT_IN_MEMORY_OR_LOAD_ON_DEMAND = "inmemory" # or "ondemand" self.data_in_memory = {} self.labels_in_memory = {} self.N_of_data = None self.indices = None # Array of indices, doesn't have to be sorted self.original_indices = None # Array of indices as they were in their original order (which coincidentally was range(N)) # will be used as a reference to which indices have been removed ... # Used only with the "RemainingUnlabeledSet" (which is never added items, just slowly poped) self.paths = [{},{},{}] # these should also be dictionaries so that you get path from the idx self.dataaug_descriptors = {} self.per_tile_class = {} # class "change" or "no-change" - in some cases we will precompute these! self.has_per_tile_class_computed = False # for balance stats self.debugger = Debugger.Debugger(settings)
def start_remote_debugger(rpcclt, pyshell): """Start the subprocess debugger, initialize the debugger GUI and RPC link Request the RPCServer start the Python subprocess debugger and link. Set up the Idle side of the split debugger by instantiating the IdbProxy, debugger GUI, and debugger GUIAdapter objects and linking them together. Register the GUIAdapter with the RPCClient to handle debugger GUI interaction requests coming from the subprocess debugger via the GUIProxy. The IdbAdapter will pass execution and environment requests coming from the Idle debugger GUI to the subprocess debugger via the IdbProxy. """ global idb_adap_oid idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\ (gui_adap_oid,), {}) idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid) gui = Debugger.Debugger(pyshell, idb_proxy) gui_adap = GUIAdapter(rpcclt, gui) rpcclt.register(gui_adap_oid, gui_adap) return gui
def main_loop(args): print(args) settings = Settings.Settings(args) history = History.History(settings) connection = Connection.Connection(settings, history) #if connection.failed: return -1 if connection.hard_stop: return -1 cropscoordinates = CropsCoordinates.CropsCoordinates(settings, history) videocapture = VideoCapture.VideoCapture(settings, history) evaluation = Evaluation.Evaluation(settings, connection, cropscoordinates, history) attentionmodel = AttentionModel.AttentionModel(settings, cropscoordinates, evaluation, history) postprocess = Postprocess.Postprocess(settings, history) renderer = Renderer.Renderer(settings, history) debugger = Debugger.Debugger(settings, cropscoordinates, evaluation) settings.save_settings() settings.set_debugger(debugger) for frame, next_frames, frame_number in videocapture.frame_generator_thread_loading( ): settings.frame_number = frame_number print("frame: ", frame[2]) for i in range(len(next_frames)): print("next_frames", i, ": ", next_frames[i][2], next_frames[i][0], next_frames[i][2:]) attention_coordinates = cropscoordinates.get_crops_coordinates( 'attention') #debugger.debug_coordinates_in_frame(attention_coordinates, frame[1],'attention') attention_evaluation = evaluation.evaluate_attention_with_precomputing( frame_number, attention_coordinates, frame, 'attention', next_frames) # attention_evaluation start in attention crops space (size of frame downscaled for attention evaluation # so that we can cut crops of 608x608 from it easily) projected_evaluation = cropscoordinates.project_evaluation_back( attention_evaluation, 'attention') #debugger.debug_evaluation_to_bboxes_after_reprojection(projected_evaluation, frame[1], 'attention', 'afterRepro') # projected_evaluation are now in original image space evaluation_coordinates = cropscoordinates.get_crops_coordinates( 'evaluation') # evaluation_coordinates are in evaluation space. (size of frame downscaled for regular evaluation # so that we can cut crops of 608x608 from it easily) #debugger.debug_coordinates_in_frame(evaluation_coordinates, frame[1], 'evaluation') active_coordinates = attentionmodel.get_active_crops_intersections( projected_evaluation, evaluation_coordinates, frame) #debugger.debug_coordinates_in_frame(active_coordinates, frame[1], 'evaluation', "__"+str(settings.frame_number)+'activeonly') if len(active_coordinates) == 0: print("Nothing left active - that's possibly ok, skip") renderer.render([], frame) history.report_skipped_final_evaluation(frame_number) continue final_evaluation = evaluation.evaluate(active_coordinates, frame, 'evaluation', frame_number) # evaluation are in evaluation space projected_final_evaluation = cropscoordinates.project_evaluation_back( final_evaluation, 'evaluation') # projected back to original space projected_active_coordinates = cropscoordinates.project_coordinates_back( active_coordinates, 'evaluation') processed_evaluations = postprocess.postprocess( projected_active_coordinates, projected_final_evaluation) #debugger.debug_evaluation_to_bboxes_after_reprojection(processed_evaluations, frame[1], 'finalpostprocessed'+frame[0][-8:-4]) renderer.render(processed_evaluations, frame) history.tick_loop(frame_number, True) history.save_whole_history_and_settings()
entitysprite.add(entity) enemysprite = pygame.sprite.Group() playersprite = createPlayerSprite( 500, 350, playerTable) # create sprite group for player player = playersprite.sprites()[0] enemy = Enemy(600, 600, player, enemyTable) enemysprite.add(enemy) enemy = Enemy(400, 600, player, enemyTable) enemysprite.add(enemy) inv = Inv(400, 300, "inventory2.png", table, player) debugger = Debugger(player) test = pygame.image.load("background.png").convert_alpha() button = pygame.image.load("button.png").convert_alpha() button2 = pygame.image.load("buttonExit.png").convert_alpha() pauseMenu = Menu(button, button2, screen, test) input = InputHandler(mapsprite, playersprite, entitysprite, enemysprite, inv, player, debugger, pauseMenu) while True: input.poll() # check input and respond collisionResponse() playersprite.update(0) mapsprite.update(0) #update map entitysprite.update(0) enemysprite.update(0) mapsprite.draw(screen) # draw map
def __init__(self, settings): self.settings = settings self.debugger = Debugger.Debugger(settings)
def __init__(self,verbose): self.debugger = Debugger(verbose=verbose) self.fileManager = FileManager(verbose=verbose)
def drop_test(file, params, show_accel=True, show_pos=False): #Inputs - File name and structure paremters #Returns - Max acceleration #Load up structure if show_pos: Viz = Debugger() load_file = file save_file = file drop_h = params[0] #dm strut_K = params[1] elastic_K = params[2] strut_L = params[3] elastic_L = params[4] K, L, X = loadFusionStructure(load_file, strut_K, elastic_K, strut_L, elastic_L) #find stability K, L, X = find_stability(K, L, X) # K, L, X = find_stability(K, L, X, Viz, display_time=1) overwrite_fusion360_file(load_file, X, K, L, strut_K, elastic_K) #Save back to YAML save_YAML(X, K, save_file) save_DROP_YAML(save_file, translation=[0, drop_h, 0]) # dm run_bullet(save_file, 3) t, x, v, a = parse_data() max_a, a_norm = get_max_a(a) L_actual = get_actual_L(X) if show_accel: plt.figure(5) plt.plot(t, a_norm) drop_height = drop_h / 10 #100 dm exp_t = np.sqrt(2 * drop_height / 9.81) ax = plt.gca() ax.set_xlim([exp_t - 0.1, exp_t + 0.1]) ax.text(0.9, 0.9, 'max: ' + str(round(max_a, 3)) + " m/s^2", horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) if show_pos: Viz.clear() Viz.draw_Pos(x) Viz.display(time=1, azimuth=45, altitude=20, drop_port=True) if show_pos or show_accel: plt.pause(3) data_pkg = dict() data_pkg['K'] = K data_pkg['L'] = L data_pkg['X'] = X data_pkg['L_actual'] = L_actual data_pkg['max_a'] = max_a data_pkg['x'] = x data_pkg['v'] = v data_pkg['a'] = a data_pkg['t'] = t return data_pkg
import numpy as np import unittest import os import sys sys.path.append(os.path.join(os.getcwd(), "utils")) sys.path.append(os.path.join(os.getcwd(), "solver")) sys.path.append('Users/zachyamaoka/Documents/de3_group_project/utils') from Debugger import * from load_structure import * from save_structure import * from solver_util import * from solver_func import * # State vector Debugger = Debugger() plt.ion() K_s = 15000 K_e = 19 * 10 K, L, X = loadFusionStructure("drone_v4", strut_L=0.44, strut_K=K_s, elastic_K=K_e, override_L=True) K, L, X = find_stability(K, L, X, Debugger, display_time=0, step=0.00001, iter=50000)
def open_debugger(self): import Debugger self.interp.setdebugger(Debugger.Debugger(self)) sys.ps1 = "[DEBUG ON]\n>>> " self.showprompt() self.set_debugger_indicator()
import sys import socket import threading import Adventure import Debugger import Network import Printer if __name__ == '__main__': network = Network.Network() debugger = Debugger.Debugger(network) printer = Printer.Printer(debugger) adventure = Adventure.Adventure(printer, debugger).cmdloop()
def __init__(self, verbose): self.debugger = Debugger(verbose=verbose)
from Node import * from Debugger import * # init the debugger debug = Debugger() debug.enable() class BinarySearchTree(object): """A Binary Search Tree Implementation: Attributes: name: A string representing the BST's name. Root: A root node which gets initialized to None. """ def __init__(self, name): """Create the root node of the BST. """ debug.printMsg("We Initiated a BST with no root node") self.name = name self.root = None self.size = 0 def length(self): """Returns the length of the BST """ return self.length def __contains__(self,key): """overload the *in* operator. [email protected]
def debug_module_event(self, event): import Debugger debugger = Debugger.Debugger(self) self.run_module_event(event, debugger)
def get_unbalanced_dataset(in_memory=False): assert in_memory == False # prep to move the dataset to >> /cluster/work/igp_psr/ruzickav << # instead of loading indiv files, load batches in h5 files from ActiveLearning.LargeDatasetHandler_AL import LargeDatasetHandler_AL import Settings # init structures import mock args = mock.Mock() args.name = "test" settings = Settings.Settings(args) WholeDataset = LargeDatasetHandler_AL(settings) # load paths of our favourite dataset! import DataLoader, DataPreprocesser, Debugger import DatasetInstance_OurAerial dataLoader = DataLoader.DataLoader(settings) debugger = Debugger.Debugger(settings) datasetInstance = DatasetInstance_OurAerial.DatasetInstance_OurAerial(settings, dataLoader, "256_cleanManual") # ! this one loads them all (CHECK: would some be deleted?) paths = datasetInstance.load_dataset_ONLY_PATHS_UPDATE_FROM_THE_OTHER_ONE_IF_NEEDED() lefts_paths, rights_paths, labels_paths = paths print("Paths: L,R,Y ", len(lefts_paths), len(rights_paths), len(labels_paths)) WholeDataset.initialize_from_just_paths(paths) if in_memory: WholeDataset.keep_it_all_in_memory() npy_path = settings.large_file_folder + "datasets/OurAerial_preloadedImgs_unBALCLASS.npy" I_WANT_TO_RECOMPUTE_THE_LABELS = False if I_WANT_TO_RECOMPUTE_THE_LABELS: assert False # don't want to mistakenly recompute these ... WholeDataset.compute_per_tile_class_in_batches() WholeDataset.save_per_tile_class(npy_path) WholeDataset.load_per_tile_class(npy_path) #WholeDataset.report() # TODO: CLEANING OF NONESENSICAL ONES (from paths ideally) # many more, saved in txt f = open('exclude.txt', "r") exclusion_list = f.readlines() f.close() exclusion_list = [str(a).strip() for a in exclusion_list] #exclusion_list = ["strip4-2012_3066.PNG"] ids_to_pop = [] for left_path in exclusion_list: to_pop = WholeDataset.get_id_from_left_path_fragment(left_path) if to_pop is not None: ids_to_pop.append(to_pop) else: print(to_pop, left_path) assert to_pop is not None # shouldnt happen, in that case we are doing something badly, stop print("Would like to pop ", len(ids_to_pop), "unwanted data points!") removed_items = WholeDataset.pop_items(ids_to_pop) WholeDataset.report() return WholeDataset