def run_demo(movie_file): """ In this demo, an image is being predicted by a set of predictive encoders looking at different aspects of the input. There is a set of future encoders digesting two 8x8 frames to predict the next one, another set taking 8 4x4 frames to predict 4 subsequent frames and another set taking 32 2x2 frames to predict 16 subsequent frames. Additionally there is one unit taking the whole image as 8x8 block whose internal representations are shared as context with all the other units. The system has feedback connections, more temporal areas feed back to more spatial areas. Also cross-like neighbourhood of lateral projections is instantiated. The simulation is synchronous, runs in single stage. """ filename = "demo03_state.p.gz" if movie_file != "": cam = cv2.VideoCapture(movie_file) else: cam = cv2.VideoCapture(-1) if not cam.isOpened(): logging.error("Either cannot read the input file or no camera found!") exit(1) if os.path.isfile(filename): state_dict = CoreUtils.load_model(filename) else: state_dict = generate_dict() manager = Manager(state_dict, 1000000, cam=cam) CoreUtils.run_model(state_dict, manager) CoreUtils.save_model(state_dict, filename)
def take_snapshot_and_backup(self): if self.prop_dict['readout_learning_rate'][0] == 0: # Unsupervised CoreUtils.save_model( self.prop_dict, "PVM_failsafe_%010d.p.gz" % int(self.prop_dict['N'][0])) to_folder = "PVM_models/%s_%s_%s" % (self.prop_dict['timestamp'], self.prop_dict['name'], self.prop_dict['hash']) from_path = "./PVM_failsafe_%010d.p.gz" % int( self.prop_dict['N'][0]) logging.info("Uploading %s/%s" % (to_folder, from_path[2:])) self.checkpoint_storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove("./PVM_failsafe_%010d.p.gz" % int(self.prop_dict['N'][0])) else: # Supervised self.signal.reset() # To avoid dataset aliasing CoreUtils.save_model( self.prop_dict, "PVM_state_supervised_%s_%d_%d_%f.p.gz" % (self.dataset_name, self.prop_dict['N'][0], int(self.steps), float(self.prop_dict['readout_learning_rate'][0]))) to_folder = "PVM_models/%s_%s_%s" % (self.prop_dict['timestamp'], self.prop_dict['name'], self.prop_dict['hash']) from_path = "./PVM_state_supervised_%s_%d_%d_%f.p.gz" % ( self.dataset_name, self.prop_dict['N'][0], int(self.steps), float(self.prop_dict['readout_learning_rate'][0])) logging.info("Uploading %s/%s" % (to_folder, from_path[2:])) self.checkpoint_storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(from_path)
def process_dream_experiment(self): if self.signal.get_index() == self.prop_dict['flags'][2]: if "stage0" not in self.dream_experiment_data.keys(): self.dream_experiment_data["stage0"] = True logging.info("Dream experiment stage0 has begun") elif "stage1" not in self.dream_experiment_data.keys(): self.dream_experiment_data["stage0"] = False self.dream_experiment_data["stage1"] = True self.prop_dict['flags'][0] = 1 # begin dreaming logging.info("Dream experiment stage1 has begun") self.freeze_learning() for (i, k) in enumerate(self.prop_dict["learning_rates"]): k[0] = -0.00001 else: self.dream_experiment = False self.prop_dict['flags'][0] = 0 # end dreaming CoreUtils.save_model(self.dream_experiment_data, "dream_data.p.gz") self.dream_experiment_data = {} logging.info("Dream experiment has ended") self.un_freeze_learning() # Stage 0 is ongoing. if "stage0" in self.dream_experiment_data.keys() and self.dream_experiment_data["stage0"] is True: if "stage0_data" not in self.dream_experiment_data.keys(): self.dream_experiment_data['stage0_data'] = [] self.dream_experiment_data['stage0_data'].append((self.actual_input_prev.copy(), self.predicted.copy())) # Stage 0 is ongoing. if "stage1" in self.dream_experiment_data.keys() and self.dream_experiment_data["stage1"] is True: if "stage1_data" not in self.dream_experiment_data.keys(): self.dream_experiment_data['stage1_data'] = [] self.dream_experiment_data['stage1_data'].append(self.predicted.copy())
def process_dream_experiment(self): if self.signal.get_index() == self.prop_dict['flags'][2]: if "stage0" not in self.dream_experiment_data.keys(): self.dream_experiment_data["stage0"] = True logging.info("Dream experiment stage0 has begun") elif "stage1" not in self.dream_experiment_data.keys(): self.dream_experiment_data["stage0"] = False self.dream_experiment_data["stage1"] = True self.prop_dict['flags'][0] = 1 # begin dreaming logging.info("Dream experiment stage1 has begun") self.freeze_learning() for (i, k) in enumerate(self.prop_dict["learning_rates"]): k[0] = -0.00001 else: self.dream_experiment = False self.prop_dict['flags'][0] = 0 # end dreaming CoreUtils.save_model(self.dream_experiment_data, "dream_data.p.gz") self.dream_experiment_data = {} logging.info("Dream experiment has ended") self.un_freeze_learning() # Stage 0 is ongoing. if "stage0" in self.dream_experiment_data.keys( ) and self.dream_experiment_data["stage0"] is True: if "stage0_data" not in self.dream_experiment_data.keys(): self.dream_experiment_data['stage0_data'] = [] self.dream_experiment_data['stage0_data'].append( (self.actual_input_prev.copy(), self.predicted.copy())) # Stage 0 is ongoing. if "stage1" in self.dream_experiment_data.keys( ) and self.dream_experiment_data["stage1"] is True: if "stage1_data" not in self.dream_experiment_data.keys(): self.dream_experiment_data['stage1_data'] = [] self.dream_experiment_data['stage1_data'].append( self.predicted.copy())
def __init__(self, filename="", remote_filename="", cores="4", storage=None, steps_per_frame=1): """ Initialize the tracker """ self.name = 'PVMtracker' if filename == "": filename = storage.get(remote_filename) self.prop_dict = CoreUtils.load_model(filename) logging.info("Loaded the dictionary %s", filename) PVM_Create.upgrade_dictionary_to_ver1_0(self.prop_dict) self.prop_dict['num_proc'] = int(cores) for k in range(len(self.prop_dict['learning_rates'])): self.prop_dict['learning_rates'][k][0] = 0.0 logging.info("Setting learning rate in layer %d to zero" % k) self.prop_dict["readout_learning_rate"][0] = 0.0 logging.info("Setting readout learning rate to zero") self.manager = Manager(self.prop_dict, 1000) self.executor = CoreUtils.ModelExecution(prop_dict=self.prop_dict, manager=self.manager, port=9100) self.executor.start(blocking=False) self.threshold = 32 self.image_size = self.prop_dict['input_array'].shape[:2][::-1] self.readout_heatmap = np.zeros(self.image_size, dtype=np.float) self.step_per_frame = steps_per_frame
def do_dump(self, line): """ Dump the state of the current simulation to a given file. Warning, this method will attempt to pause a simulation and wait 1s, but is not guaranteed to save a consistent state. Use only in an emergency. """ self.dict['paused'][0] = PVM_Create.PVM_PAUSE time.sleep(1) import PVM_framework.CoreUtils as CoreUtils CoreUtils.save_model(self.dict, line) self.dict['paused'][0] = PVM_Create.PVM_RESUME
def run_demo(): """ In this simple demo the crticial temperature Ising model is run synchronously by a set of units on a large 1000x1000 domain. To make things fast the worker code if written in cython. :return: """ filename = "demo01_state.p.gz" if os.path.isfile(filename): sate_dict = CoreUtils.load_model(filename) else: sate_dict = generate_dict() manager = Manager(sate_dict, 1000000) CoreUtils.run_model(sate_dict, manager) CoreUtils.save_model(sate_dict, filename)
def __init__(self, filename): self.file = open(filename, mode="rb") self._has_frames = True try: self._header = CoreUtils.load_legacy_pickle_file(self.file) except EOFError: self._has_frames = False
def frames(self): """ A generator call for returning the collection of frames. :Example: :: mr = LabeledMovieReader("./movie_test.pkl") for Frame in mr.frames(): img = Frame.get_image() cv2.imshow("Image", img) cv2.waitKey(int(1000/mr.fps)) :return: a frame iterator """ ReferenceFrame = LabeledMovieFrame() ver = ReferenceFrame.version warning_message = True while True: try: F = CoreUtils.load_legacy_pickle_file(self.file) if F.version < ver: if warning_message: print "This movie uses old version frames (%2.2f), needs upgrading" % F.version print "Performance will be affected!" warning_message = False F = LabeledMovieFrame.upgrade_to_latest_version(F) yield F except EOFError: self._has_frames = False break
def take_snapshot_and_backup(self): if self.prop_dict['readout_learning_rate'][0] == 0: # Unsupervised CoreUtils.save_model(self.prop_dict, "PVM_failsafe_%010d.p.gz" % int(self.prop_dict['N'][0])) to_folder = "PVM_models/%s_%s_%s" % (self.prop_dict['timestamp'], self.prop_dict['name'], self.prop_dict['hash']) from_path = "./PVM_failsafe_%010d.p.gz" % int(self.prop_dict['N'][0]) logging.info("Uploading %s/%s" % (to_folder, from_path[2:])) self.checkpoint_storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove("./PVM_failsafe_%010d.p.gz" % int(self.prop_dict['N'][0])) else: # Supervised self.signal.reset() # To avoid dataset aliasing CoreUtils.save_model(self.prop_dict, "PVM_state_supervised_%s_%d_%d_%f.p.gz" % (self.dataset_name, self.prop_dict['N'][0], int(self.steps), float(self.prop_dict['readout_learning_rate'][0]))) to_folder = "PVM_models/%s_%s_%s" % (self.prop_dict['timestamp'], self.prop_dict['name'], self.prop_dict['hash']) from_path = "./PVM_state_supervised_%s_%d_%d_%f.p.gz" % (self.dataset_name, self.prop_dict['N'][0], int(self.steps), float(self.prop_dict['readout_learning_rate'][0])) logging.info("Uploading %s/%s" % (to_folder, from_path[2:])) self.checkpoint_storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(from_path)
def run_demo(movie_file): """ In this demo a simple future/predictive encoder is being instantiated to predict a camera image based on two previous frames. """ filename = "demo02_state.p.gz" if movie_file != "": cam = cv2.VideoCapture(movie_file) else: cam = cv2.VideoCapture(-1) if not cam.isOpened(): logging.error("Either cannot read the input file or no camera found!") exit(1) if os.path.isfile(filename): dict = CoreUtils.load_model(filename) else: dict = generate_dict() manager = Manager(dict, 1000, cam=cam) CoreUtils.run_model(dict, manager) CoreUtils.save_model(dict, filename)
def run_demo(): """ In this very simple demo a set of workers operate on a 500x500 image domain by randomly flipping selected bits. To make things faster the bit/byte flipping function is written in cython. :return: """ filename = "demo00_state.p.gz" if os.path.isfile(filename): state_dict = CoreUtils.load_model(filename) else: state_dict = generate_dict() manager = Manager(state_dict, 1000) executor = CoreUtils.ModelExecution(prop_dict=state_dict, manager=manager) executor.start(blocking=True) CoreUtils.save_model(state_dict, filename) print("Saving and running again in non blocking mode") executor.start(blocking=False) while manager.running(): executor.step() executor.finish() CoreUtils.save_model(state_dict, filename)
def run_demo(movie_file): """ In this demo a future/predictive encoder is being instantiated to predict a camera image based on two previous frames. The encoder predicts the signal and its own error on that signal, that is additional set of units are trying to predict the magnitude of error between the prediction and the signal. In addition the hidden layer activations from the previous step of execution are used as the context block. Second order error is calculated as the error of the error prediction. Also, the learning rate of the primary signal is modulated by the magnitude of the second order error. """ if movie_file != "": cam = cv2.VideoCapture(movie_file) else: cam = cv2.VideoCapture(-1) if not cam.isOpened(): logging.error("Either cannot read the input file or no camera found!") exit(1) filename = "demo04_state.p.gz" if os.path.isfile(filename): state_dict = CoreUtils.load_model(filename) else: state_dict = generate_dict() manager = Manager(state_dict, 1000000, cam=cam) CoreUtils.run_model(state_dict, manager) CoreUtils.save_model(state_dict, filename)
def __init__(self, stdin=None, stdout=None, infilename=None, dict=None, pprint=False, sprint=False, nans=None, gt=None, lt=None, abss=None, filter_name=None, upgrade=False): """ Console object used to traverse the simulation dictionary. Can be used standalone on a saved distinary or live on a running simulation by logging into the debug port. :param stdin: :param stdout: :param infilename: :param dict: :param pprint: :param sprint: :param nans: :param gt: :param lt: :param abss: :param filter_name: :return: """ if stdin is not None: self.stdin = stdin else: self.stdin=sys.stdin self.use_rawinput=True if stdout is not None: self.stdout = stdout else: self.stdout=sys.stdout cmd.Cmd.__init__(self, stdin=self.stdin, stdout=self.stdout) if dict is None and infilename is None: sys.stderr.write("dict and infilename cannot be empty at the same time") if dict is not None: self.dict = dict self.filename = str(infilename) elif os.path.exists(infilename) and os.path.isfile(infilename): # This import below needs to go here as otherwise it would # lead to a circular import and failure. import PVM_framework.CoreUtils as CoreUtils self.dict = CoreUtils.load_model(infilename) if upgrade: import PVM_framework.PVM_Create as PVM_Create PVM_Create.upgrade_dictionary_to_ver1_0(self.dict) else: sys.stderr.write("Input file not found\n") self.pprint = pprint self.sprint = sprint self.filename = str(infilename) self.gt = None self.lt = None self.nans = None self.filter_name = None self.abss=None if gt is not None: self.gt = float(gt) if lt is not None: self.lt = float(lt) if nans is not None: self.nans = True if filter_name is not None: self.filter_name = filter_name if abss is not None: self.abss = float(abss) self.text_bold = '\033[1m' self.text_end = '\033[0m' self.prompt = self.text_bold + str(self.dict['name'])+"/$ " + self.text_end self.current_element = self.dict self.pwd = "/" logging.info("Created an interactive dictionary explorer session")
def run_model(evaluate=False, filename="", cores="", name="", description="", remote="", display=False, dataset="", meta={}, options_given={}, storage=None, port="9000", checkpoint=True, upgrade_only=False): """ In this demo a future/predictive encoder is being instantiated to predict a camera image based on two previous frames. The system is built into a three layer hierarchy in which each next layer is predicting the hidden activations of the lower one. In addition the errors from each later are being backpropagated down to the previous layer. In addition to that, errors generated at context blocka are also being backpropagated to the originating unit. Consequently the error and signals flows in both directions through the entire system. """ if options_given == {} and filename == "" and remote == "": logging.error("No options were given, don't know what to run! Try running with -h option.") exit() options = PVM_options.parse_options(options_given) if remote != "": filename = storage.get(remote) logging.info("Loaded a remote simulation dict %s" % remote) logging.info("Following options were given: %s" % json.dumps(options, sort_keys=True, indent=4)) if os.path.isfile(filename): simulation_dict = CoreUtils.load_model(filename) if "options" in simulation_dict: options = PVM_options.parse_options(options_given, options_in_the_dict=simulation_dict['options']) else: options = PVM_options.parse_options(options_given) logging.info("Loaded the dictionary") if cores is not "": simulation_dict['num_proc'] = int(cores) else: simulation_dict['num_proc'] = min(2*mp.cpu_count()/3, simulation_dict["stage0_size"]/2) PVM_Create.upgrade(simulation_dict) PVM_Create.upgrade_dictionary_to_ver1_0(simulation_dict) logging.info("Running on %d cpu's" % simulation_dict['num_proc']) else: options = PVM_options.parse_options(options) simulation_dict = PVM_Create.generate_dict_options(name=name, description=description, options=options ) if cores is not "": simulation_dict['num_proc'] = int(cores) else: if options["model_type"] != "tiny": simulation_dict['num_proc'] = 2*mp.cpu_count()/3 else: simulation_dict['num_proc'] = 1 logging.info("Generated the dictionary") logging.info("Full set of options: %s" % json.dumps(options, sort_keys=True, indent=4)) if options["new_name"] != "": simulation_dict['name'] = options["new_name"] options["new_name"] = "" if "disable_lateral" in options.keys() and options["disable_lateral"] == "1": simulation_dict["disable_lateral"] = True if "disable_feedback" in options.keys() and options["disable_feedback"] == "1": simulation_dict["disable_feedback"] = True if dataset == "": dataset = options["dataset"] else: options["dataset"] = dataset PVM_set = PVM_datasets.PVMDataset(dataset, storage=storage) PVM_Create.apply_options(simulation_dict, options) if upgrade_only: CoreUtils.save_model(simulation_dict, filename) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = filename storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) return if options['supervised'] == "1": logging.info("Running in the supervised mode") for (i, k) in enumerate(simulation_dict['learning_rates']): k[0] = 0 logging.info("Setting learning rate %d to zero") simulation_dict['readout_learning_rate'][0] = float(options["supervised_rate"]) logging.info("Setting additional_learning_rate to %f" % simulation_dict['readout_learning_rate'][0]) if not evaluate: status_file = "/tmp/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) f = open(status_file, "w") branch = subprocess.Popen('git rev-parse --abbrev-ref HEAD', shell=True, stdout=subprocess.PIPE).stdout.read() f.write("BRANCH=%s\n" % cleanup(branch)) f.write("TIMESTAMP=%s\n" % simulation_dict['timestamp']) f.write("NAME=%s\n" % simulation_dict['name']) f.write("HASH=%s\n" % simulation_dict['hash']) f.write("DATASET=%s\n" % dataset) f.write("OPTIONS=%s\n" % json.dumps(options)) f.close() remove_artifact_files = True if meta == {}: logging.info("Not running on an Amazon EC2 instance, apparently") logging.info("Not running on an Amazon EC2 instance, apparently: So, not automatically removing downloaded artifact files.") remove_artifact_files = False elif options['supervised'] != '1': host = meta['public-ipv4'] logging.info("Running on amazon instance %s. Adding active job" % host) storage.put(from_path=status_file, to_folder='DARPA/active_jobs/', overwrite=True) # Train signal = PVM_SignalProvider.SimpleSignalProvider(files=PVM_set.training, storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_arrays'][0].shape[:2][::-1], channel="default", remove_files=remove_artifact_files, reverse=(int(options['reverse']) > 0)) manager = Manager(simulation_dict, int(options['steps']), signal_provider=signal, record=False, video_recorder=PVM_display_helper.VideoRecorder(rec_filename="PVM_recording.avi"), do_display=display, checkpoint=checkpoint, checkpoint_storage=storage, dataset_name=dataset) CoreUtils.run_model(simulation_dict, manager, port=int(port)) if filename != "" and options['supervised'] != "1": CoreUtils.save_model(simulation_dict, filename) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = filename storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) elif options['supervised'] == "1": CoreUtils.save_model(simulation_dict, "PVM_state_supervised_%s_%d_%d_%f.p.gz" % (dataset, simulation_dict['N'][0], int(options['steps']), float(options['supervised_rate']))) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = "./PVM_state_supervised_%s_%d_%d_%f.p.gz" % (dataset, simulation_dict['N'][0], int(options['steps']), float(options['supervised_rate'])) storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) else: CoreUtils.save_model(simulation_dict, "PVM_state_final.p.gz") to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = "./PVM_state_final.p.gz" storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) else: print "Evaluating the system" logging.info("Evaluating the system") to_folder = "PVM_models/%s_%s_%s/eval_%09d/" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash'], simulation_dict['N']) # Evaluate signal = PVM_SignalProvider.SimpleSignalProvider(files=PVM_set.testing, storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_array_float00'].shape[:2][::-1], channel="default", reverse=(int(options['reverse']) > 0)) name = "PVM_train_eval_%s_%09d_test_combined.avi" % (simulation_dict['hash'], simulation_dict['N']) manager = Manager(simulation_dict, steps_to_run=-1, signal_provider=signal, record=True, video_recorder=PVM_display_helper.VideoRecorder(rec_filename=name), do_display=display, evaluate=True, collect_error=True) manager.freeze_learning() CoreUtils.run_model(simulation_dict, manager, port=int(port)) from_path = name storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(name) logging.info("Finished on test files") # Individual files for (i, test) in enumerate(PVM_set.all): print "Running on %s" % test logging.info("Running on %s" % test) name = "PVM_eval_%s_%09d_%01d_%s.avi" % (simulation_dict['hash'], simulation_dict['N'], i, test[1]) signal = PVM_SignalProvider.SimpleSignalProvider(files=[test], storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_array_float00'].shape[:2][::-1], channel="default") manager = Manager(simulation_dict, steps_to_run=-1, signal_provider=signal, record=True, video_recorder=PVM_display_helper.VideoRecorder(rec_filename=name), do_display=display, evaluate=True) CoreUtils.run_model(simulation_dict, manager, port=int(port)) from_path = name storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(name) logging.info("Finished on %s" % test)
def plot_model(filename, remote, compare, display): ts = Storage() if remote != "": filename = ts.get(remote) if compare != "": filename1 = ts.get(compare) simulation_dict1 = CoreUtils.load_model(filename1) simulation_dict = CoreUtils.load_model(filename) num_layers = 0 for k in simulation_dict.keys(): if "state_array" in k: num_layers += 1 plt.figure(figsize=(7, 5)) colors = ['r', 'g', 'b', 'k', 'c', 'm', 'y'] ids = np.where(simulation_dict['error_log'][0, :] > 0)[0] window = (ids.shape[0])/200 small_window = (ids.shape[0])/40 for r in range(num_layers): # plt.suptitle("MSE %s" % (simulation_dict['name'])) plt.plot(simulation_dict['error_log'][0, ids][:-small_window], runningMeanFast(simulation_dict['error_log'][r+1, ids], small_window)[:-small_window], lw=1, c=colors[r % 7], label="%s l. %d" % (simulation_dict['name'], r)) if compare: for r in range(num_layers): plt.plot(simulation_dict1['error_log'][0, ids][:-small_window], runningMeanFast(simulation_dict1['error_log'][r+1, ids], small_window)[:-small_window], linestyle="--", lw=1, c=colors[r % 7], label="%s l. %d" % (simulation_dict1['name'], r)) plt.xlabel("Training time") plt.ylabel("MSE (averaged in %d step bins)" % (1000*small_window)) plt.title("Learning curve (MSE) - individual layers") plt.grid(True) plt.legend(prop={'size': 9}) pdf_file = "PVM_%s_%s.pdf" % (simulation_dict['name'], simulation_dict['hash']) if compare != "": pdf_file = "PVM_%s_%s_comp_%s.pdf" % (simulation_dict['name'], simulation_dict['hash'], simulation_dict1['name']) plt.savefig(pdf_file) to_folder = "DARPA/Simulations/%s_%s_%s/" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) plt.figure(figsize=(7, 5)) colors = ['r', 'g', 'b', 'k', 'c', 'm', 'y'] ids = np.where(simulation_dict['error_log'][0, :] > 0)[0] summed = np.zeros_like(simulation_dict['error_log'][1, ids]) for r in range(num_layers): summed += simulation_dict['error_log'][r+1, ids] plt.plot(simulation_dict['error_log'][0, ids][:-small_window], runningMeanFast(summed, small_window)[:-small_window], lw=1, c='r', label="MSE All layers, model %s" % simulation_dict['name']) if compare != "": summed = np.zeros_like(simulation_dict1['error_log'][1, ids]) for r in range(num_layers): summed += simulation_dict1['error_log'][r+1, ids] plt.plot(simulation_dict1['error_log'][0, ids][:-small_window], runningMeanFast(summed, small_window)[:-small_window], lw=1, c='b', label="MSE All layers, model %s" % simulation_dict1['name']) plt.xlabel("Training time") plt.ylabel("MSE (averaged in %d step bins)" % (1000*small_window)) plt.title("Learning curve (MSE) - whole system") plt.grid(True) plt.legend(prop={'size': 9}) pdf_file = "PVM_%s_%s_summed.pdf" % (simulation_dict['name'], simulation_dict['hash']) if compare != "": pdf_file = "PVM_%s_%s_summed_comp_%s.pdf" % (simulation_dict['name'], simulation_dict['hash'], simulation_dict1['name']) plt.savefig(pdf_file) to_folder = "DARPA/Simulations/%s_%s_%s/" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) # pdf_file = plot_weight_dists(simulation_dict) # ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) # if compare != "": # pdf_file = plot_weight_dists(simulation_dict1) # ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) if display: plt.show()
def run_model(evaluate=False, filename="", cores="", name="", description="", remote="", display=False, dataset="", meta={}, options_given={}, storage=None, port="9000", checkpoint=True, upgrade_only=False): """ In this demo a future/predictive encoder is being instantiated to predict a camera image based on two previous frames. The system is built into a three layer hierarchy in which each next layer is predicting the hidden activations of the lower one. In addition the errors from each later are being backpropagated down to the previous layer. In addition to that, errors generated at context blocka are also being backpropagated to the originating unit. Consequently the error and signals flows in both directions through the entire system. """ if options_given == {} and filename == "" and remote == "": logging.error( "No options were given, don't know what to run! Try running with -h option." ) exit() options = PVM_options.parse_options(options_given) if remote != "": filename = storage.get(remote) logging.info("Loaded a remote simulation dict %s" % remote) logging.info("Following options were given: %s" % json.dumps(options, sort_keys=True, indent=4)) if os.path.isfile(filename): simulation_dict = CoreUtils.load_model(filename) if "options" in simulation_dict: options = PVM_options.parse_options( options_given, options_in_the_dict=simulation_dict['options']) else: options = PVM_options.parse_options(options_given) logging.info("Loaded the dictionary") if cores is not "": simulation_dict['num_proc'] = int(cores) else: simulation_dict['num_proc'] = min( 2 * mp.cpu_count() / 3, simulation_dict["stage0_size"] / 2) PVM_Create.upgrade(simulation_dict) PVM_Create.upgrade_dictionary_to_ver1_0(simulation_dict) logging.info("Running on %d cpu's" % simulation_dict['num_proc']) else: options = PVM_options.parse_options(options) simulation_dict = PVM_Create.generate_dict_options( name=name, description=description, options=options) if cores is not "": simulation_dict['num_proc'] = int(cores) else: if options["model_type"] != "tiny": simulation_dict['num_proc'] = 2 * mp.cpu_count() / 3 else: simulation_dict['num_proc'] = 1 logging.info("Generated the dictionary") logging.info("Full set of options: %s" % json.dumps(options, sort_keys=True, indent=4)) if options["new_name"] != "": simulation_dict['name'] = options["new_name"] options["new_name"] = "" if "disable_lateral" in options.keys( ) and options["disable_lateral"] == "1": simulation_dict["disable_lateral"] = True if "disable_feedback" in options.keys( ) and options["disable_feedback"] == "1": simulation_dict["disable_feedback"] = True if dataset == "": dataset = options["dataset"] else: options["dataset"] = dataset PVM_set = PVM_datasets.PVMDataset(dataset, storage=storage) PVM_Create.apply_options(simulation_dict, options) if upgrade_only: CoreUtils.save_model(simulation_dict, filename) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = filename storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) return if options['supervised'] == "1": logging.info("Running in the supervised mode") for (i, k) in enumerate(simulation_dict['learning_rates']): k[0] = 0 logging.info("Setting learning rate %d to zero") simulation_dict['readout_learning_rate'][0] = float( options["supervised_rate"]) logging.info("Setting additional_learning_rate to %f" % simulation_dict['readout_learning_rate'][0]) if not evaluate: status_file = "/tmp/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) f = open(status_file, "w") branch = subprocess.Popen('git rev-parse --abbrev-ref HEAD', shell=True, stdout=subprocess.PIPE).stdout.read() f.write("BRANCH=%s\n" % cleanup(branch)) f.write("TIMESTAMP=%s\n" % simulation_dict['timestamp']) f.write("NAME=%s\n" % simulation_dict['name']) f.write("HASH=%s\n" % simulation_dict['hash']) f.write("DATASET=%s\n" % dataset) f.write("OPTIONS=%s\n" % json.dumps(options)) f.close() remove_artifact_files = True if meta == {}: logging.info("Not running on an Amazon EC2 instance, apparently") logging.info( "Not running on an Amazon EC2 instance, apparently: So, not automatically removing downloaded artifact files." ) remove_artifact_files = False elif options['supervised'] != '1': host = meta['public-ipv4'] logging.info("Running on amazon instance %s. Adding active job" % host) storage.put(from_path=status_file, to_folder='DARPA/active_jobs/', overwrite=True) # Train signal = PVM_SignalProvider.SimpleSignalProvider( files=PVM_set.training, storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_arrays'][0].shape[:2] [::-1], channel="default", remove_files=remove_artifact_files, reverse=(int(options['reverse']) > 0)) manager = Manager(simulation_dict, int(options['steps']), signal_provider=signal, record=False, video_recorder=PVM_display_helper.VideoRecorder( rec_filename="PVM_recording.avi"), do_display=display, checkpoint=checkpoint, checkpoint_storage=storage, dataset_name=dataset) CoreUtils.run_model(simulation_dict, manager, port=int(port)) if filename != "" and options['supervised'] != "1": CoreUtils.save_model(simulation_dict, filename) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = filename storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) elif options['supervised'] == "1": CoreUtils.save_model( simulation_dict, "PVM_state_supervised_%s_%d_%d_%f.p.gz" % (dataset, simulation_dict['N'][0], int( options['steps']), float(options['supervised_rate']))) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = "./PVM_state_supervised_%s_%d_%d_%f.p.gz" % ( dataset, simulation_dict['N'][0], int( options['steps']), float(options['supervised_rate'])) storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) else: CoreUtils.save_model(simulation_dict, "PVM_state_final.p.gz") to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = "./PVM_state_final.p.gz" storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) if remove_artifact_files: os.remove(from_path) else: print "Evaluating the system" logging.info("Evaluating the system") to_folder = "PVM_models/%s_%s_%s/eval_%09d/" % ( simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash'], simulation_dict['N']) # Evaluate signal = PVM_SignalProvider.SimpleSignalProvider( files=PVM_set.testing, storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_array_float00']. shape[:2][::-1], channel="default", reverse=(int(options['reverse']) > 0)) name = "PVM_train_eval_%s_%09d_test_combined.avi" % ( simulation_dict['hash'], simulation_dict['N']) manager = Manager( simulation_dict, steps_to_run=-1, signal_provider=signal, record=True, video_recorder=PVM_display_helper.VideoRecorder(rec_filename=name), do_display=display, evaluate=True, collect_error=True) manager.freeze_learning() CoreUtils.run_model(simulation_dict, manager, port=int(port)) from_path = name storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(name) logging.info("Finished on test files") # Individual files for (i, test) in enumerate(PVM_set.all): print "Running on %s" % test logging.info("Running on %s" % test) name = "PVM_eval_%s_%09d_%01d_%s.avi" % ( simulation_dict['hash'], simulation_dict['N'], i, test[1]) signal = PVM_SignalProvider.SimpleSignalProvider( files=[test], storage=storage, frame_resolution=(simulation_dict['input_array'].shape[1], simulation_dict['input_array'].shape[0]), heatmap_resolution=simulation_dict['readout_array_float00']. shape[:2][::-1], channel="default") manager = Manager(simulation_dict, steps_to_run=-1, signal_provider=signal, record=True, video_recorder=PVM_display_helper.VideoRecorder( rec_filename=name), do_display=display, evaluate=True) CoreUtils.run_model(simulation_dict, manager, port=int(port)) from_path = name storage.put(from_path=from_path, to_folder=to_folder, overwrite=True) os.remove(name) logging.info("Finished on %s" % test)
if __name__ == '__main__': logging.basicConfig(filename="PVM_upgrade.log", level=logging.DEBUG, format='%(asctime)s : %(levelname)s : %(thread)d PVM_run : %(message)s ') logging.getLogger().addHandler(logging.StreamHandler()) logging.info("###################################################################") logging.info(" STARTING NEW RUN ") logging.info("###################################################################") parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", help="File to load", type=str, default="") parser.add_argument("-r", "--remote", help="Download and run a remote simulation", type=str, default="") parser.add_argument("-d", "--destination", help="Where to save the model", type=str, default="PVM_models/") parser.add_argument("-n", "--name", help="New name", type=str, default="") args = parser.parse_args() Storage = PVM_Storage.Storage() if args.remote != "": filename = Storage.get(args.remote) logging.info("Loaded a remote simulation dict %s" % args.remote) if os.path.isfile(filename): simulation_dict = CoreUtils.load_model(filename) logging.info("Loaded the dictionary") PVM_Create.upgrade_dictionary_to_ver1_0(simulation_dict) for k in sorted(simulation_dict.keys()): print k if args.name != "": simulation_dict['name'] = args.name CoreUtils.save_model(simulation_dict, "PVM_failsafe_%010d.p.gz" % int(simulation_dict['N'][0])) to_folder = "PVM_models/%s_%s_%s" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) from_path = "./PVM_failsafe_%010d.p.gz" % int(simulation_dict['N'][0]) logging.info("Uploading %s/%s" % (to_folder, from_path[2:])) Storage.put(from_path=from_path, to_folder=to_folder, overwrite=True)
def plot_model(filename, remote, compare, display): ts = Storage() if remote != "": filename = ts.get(remote) if compare != "": filename1 = ts.get(compare) simulation_dict1 = CoreUtils.load_model(filename1) simulation_dict = CoreUtils.load_model(filename) num_layers = len(simulation_dict["state_arrays"]) plt.figure(figsize=(7, 5)) colors = ['r', 'g', 'b', 'k', 'c', 'm', 'y'] ids = np.where(simulation_dict['error_log'][0, :] > 0)[0] window = (ids.shape[0]) / 200 small_window = (ids.shape[0]) / 40 for r in range(num_layers): # plt.suptitle("MSE %s" % (simulation_dict['name'])) plt.plot(simulation_dict['error_log'][0, ids][:-small_window], runningMeanFast(simulation_dict['error_log'][r + 1, ids], small_window)[:-small_window], lw=1, c=colors[r % 7], label="%s l. %d" % (simulation_dict['name'], r)) if compare: for r in range(num_layers): plt.plot(simulation_dict1['error_log'][0, ids][:-small_window], runningMeanFast(simulation_dict1['error_log'][r + 1, ids], small_window)[:-small_window], linestyle="--", lw=1, c=colors[r % 7], label="%s l. %d" % (simulation_dict1['name'], r)) plt.xlabel("Training time") plt.ylabel("MSE (averaged in %d step bins)" % (1000 * small_window)) plt.title("Learning curve (MSE) - individual layers") plt.grid(True) plt.legend(prop={'size': 9}) pdf_file = "PVM_%s_%s.pdf" % (simulation_dict['name'], simulation_dict['hash']) if compare != "": pdf_file = "PVM_%s_%s_comp_%s.pdf" % (simulation_dict['name'], simulation_dict['hash'], simulation_dict1['name']) plt.savefig(pdf_file) to_folder = "DARPA/Simulations/%s_%s_%s/" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) plt.figure(figsize=(7, 5)) colors = ['r', 'g', 'b', 'k', 'c', 'm', 'y'] ids = np.where(simulation_dict['error_log'][0, :] > 0)[0] summed = np.zeros_like(simulation_dict['error_log'][1, ids]) for r in range(num_layers): summed += simulation_dict['error_log'][r + 1, ids] plt.plot(simulation_dict['error_log'][0, ids][:-small_window], runningMeanFast(summed, small_window)[:-small_window], lw=1, c='r', label="MSE All layers, model %s" % simulation_dict['name']) if compare != "": summed = np.zeros_like(simulation_dict1['error_log'][1, ids]) for r in range(num_layers): summed += simulation_dict1['error_log'][r + 1, ids] plt.plot(simulation_dict1['error_log'][0, ids][:-small_window], runningMeanFast(summed, small_window)[:-small_window], lw=1, c='b', label="MSE All layers, model %s" % simulation_dict1['name']) plt.xlabel("Training time") plt.ylabel("MSE (averaged in %d step bins)" % (1000 * small_window)) plt.title("Learning curve (MSE) - whole system") plt.grid(True) plt.legend(prop={'size': 9}) pdf_file = "PVM_%s_%s_summed.pdf" % (simulation_dict['name'], simulation_dict['hash']) if compare != "": pdf_file = "PVM_%s_%s_summed_comp_%s.pdf" % (simulation_dict['name'], simulation_dict['hash'], simulation_dict1['name']) plt.savefig(pdf_file) to_folder = "DARPA/Simulations/%s_%s_%s/" % (simulation_dict['timestamp'], simulation_dict['name'], simulation_dict['hash']) ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) # pdf_file = plot_weight_dists(simulation_dict) # ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) # if compare != "": # pdf_file = plot_weight_dists(simulation_dict1) # ts.put(from_path=pdf_file, to_folder=to_folder, overwrite=True) if display: plt.show()