def __init__(self, sparkle): """Call parent constructors.""" VirtManager.__init__(self) ModelManager.__init__(self, sparkle) UdevManager.__init__(self) NetworkManager.__init__(self)
def __init__(self, arm="irb_120", gripper="robotiq_85"): roscpp_initialize(sys.argv) self.arm = moveit_commander.MoveGroupCommander(arm) self.gripper = moveit_commander.MoveGroupCommander(gripper) self.pose = self.arm.get_current_pose().pose self.x = self.pose.position.x self.y = self.pose.position.y self.z = self.pose.position.z quaternion = (self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w) euler = euler_from_quaternion(quaternion) self.roll = euler[0] self.pitch = euler[1] self.yaw = euler[2] self.gripper_length = 0.15 self.event = threading.Event() self.movement_finish = False self.modelmanager = ModelManager() self.modelmanager.spawn_all_model() self.plan_trajectory = None self.get_workspace()
class Model(object): def __init__(self, metadata, train_output_path="./", test_input_path="./"): """ Initialization for model :param metadata: a dict formed like: {"class_num": 7, "train_num": 428, "test_num": 107, "time_budget": 1800} """ self.done_training = False self.metadata = metadata self.train_loop_num = 0 log(f'Metadata: {self.metadata}') self.data_manager = None self.model_manager = None self.train_output_path = train_output_path self.test_input_path = test_input_path @timeit def train(self, train_dataset, remaining_time_budget=None): """model training on train_dataset. :param train_dataset: tuple, (train_x, train_y) train_x: list of vectors, input train speech raw data. train_y: A `numpy.ndarray` matrix of shape (sample_count, class_num). here `sample_count` is the number of examples in this dataset as train set and `class_num` is the same as the class_num in metadata. The values should be binary. :param remaining_time_budget: """ if self.done_training: return self.train_loop_num += 1 if self.train_loop_num == 1: self.data_manager = DataManager(self.metadata, train_dataset) self.model_manager = ModelManager(self.metadata, self.data_manager) self.model_manager.fit(train_loop_num=self.train_loop_num) if self.train_loop_num > 500: self.done_training = True @timeit def test(self, test_x, remaining_time_budget=None): """ :param test_x: list of vectors, input test speech raw data. :param remaining_time_budget: :return: A `numpy.ndarray` matrix of shape (sample_count, class_num). here `sample_count` is the number of examples in this dataset as train set and `class_num` is the same as the class_num in metadata. The values should be binary. """ # extract test feature pred_y = self.model_manager.predict(test_x, is_final_test_x=True) result = pred_y return result
def get_all_params(): all_params = [] for param in step2.get_param_list(): all_params.append(param) for param in step3.get_param_list(): all_params.append(param) # _modelManager = ModelManager() for param in utils.dict_get_param_list(_modelManager.get_properties()): all_params.append(param) # for metric in utils.dict_get_param_list( learn_evaluate_results.learning_metrics_template): all_params.append(metric) # for base_params in utils.dict_get_param_list( learn_evaluate_results.post_learning_metrics_template): for postfix in ('val', 'test1', 'test2'): current = base_params current += '_' current += postfix all_params.append(current) # for obsolete_metrics in utils.dict_get_param_list( learn_evaluate_results.obsolete_metrics_for_backward_compatibility ): all_params.append(obsolete_metrics) # return all_params
def getTestModelMgr(): mgr = ModelManager() mgr.addPredicate('_areBeingMultiplied(_time(0,1), _id(2,1), _id(2,2))') mgr.addPredicate('__isLessThan(_id(2,1), _id(2,2))') #mgr.addPredicate('_isSumOfTerms(_time(0,1), _id(2,1))') return mgr
def parseAnsSetFromPredicates(self, predicates_list): """ compose as a string every solution in the predicate list given""" model_mgr = ModelManager() # parse predicates for each time step # TODO: add applicable and selected heuristics for predicate_string in predicates_list: pred_obj = pred_parser.predicateStringToParsedPredicate(predicate_string) time = pred_parser.getTimeFromPredObject(pred_obj) if time != None: self.addPredicateForTimeStep(time, pred_obj) # look for substitution data, to swap variables if pred_obj.name == 'substitutedDegree': _, degree = pred_parser.argsToListOfStrings(pred_obj) self.sub_data['degree'] = degree self.sub_data['step'] = int(time.step) if pred_obj.name == 'solutionValue' : # rational numbers are represented as numer, denom ordered pairs _, numer, denom = pred_parser.argsToListOfStrings(pred_obj) soln_value = numer if denom == '1' else numer +'/' + denom self.soln_list.append(soln_value) # add predicate to model manager model_mgr.addPredicate(predicate_string) # handle post processing at solution level, and step level self.solutionLevelPostProcessing() for eqn_step in self.solution_steps.values(): eqn_step.postProcessStepData(model_mgr)
def __init__(self, record_training_data=False, model=None): print('Setting up PiCar...') if record_training_data and not model: self.record_training_data = True print('Recording training data...') else: self.record_training_data = False if model: self.model_manager = ModelManager() self.model_manager.load_model(model) else: self.model_manager = None self.dc = DriveController() self.aws_manager = AWSManager() self.setup_camera() # Structs for collecting training images and labels self.training_images = [] self.training_labels = [] # Stores the current user drive instruction self.current_drive_input = 'forward' # Using pygame in process remote keyboard inputs pygame.init() pygame.display.set_mode((100, 100))
def __init__(self): self.image_processor = ImageProcessor(crop_face=CROP_FACES) self.model_manager = ModelManager() # initialize volatile variables self.prediction_model = MobilenetPredictor() self.personal_trainer = PersonalTrainer() self.sessions = {}
def run_one_config(opt, model_type, case_study=False): set_random_seeds() dataset = DataSet(opt, model_type) model_manager = ModelManager(opt) model, train_time = model_manager.build_model(model_type, dataset) evaluator = Evaluator(opt) metrics = evaluator.eval(model, model_type, dataset.test_loader) evaluator.write_performance(model_type, metrics, train_time) run_case_study(model, dataset, opt, case_study)
def __init__(self): roscpp_initialize(sys.argv) self.arm = moveit_commander.MoveGroupCommander("ur10_manipulator") # self.arm = moveit_commander.MoveGroupCommander("arm") self.arm.set_pose_reference_frame("ur10_base_link") self.gripperpub = rospy.Publisher("gripper_controller/command", JointTrajectory, queue_size=0) self.transform_arm_to_baselink = Point() self.get_arm_to_baselink() self.pose = self.arm.get_current_pose().pose self.x = self.pose.position.x self.y = self.pose.position.y self.z = self.pose.position.z quaternion = (self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w) euler = euler_from_quaternion(quaternion) self.roll = euler[0] self.pitch = euler[1] self.yaw = euler[2] filename = os.path.join(rospkg.RosPack().get_path('rqt_industrial_robot'), 'src','rqt_mobile_manipulator', 'joints_setup.yaml') with open(filename) as file: joints_setup = yaml.load(file) jointslimit = joints_setup["joints_limit"] home_value = joints_setup["home_value"] j1 = home_value["joint_1"] j2 = home_value["joint_2"] j3 = home_value["joint_3"] j4 = home_value["joint_4"] j5 = home_value["joint_5"] j6 = home_value["joint_6"] g = home_value["gripper"] self.set_home_value([j1, j2, j3, j4, j5, j6, g]) # self.back_to_home() self.gripper_length = 0.34 # self.event = threading.Event() self.movement_finish = False self.modelmanager = ModelManager() self.modelmanager.spawn_all_model() self.plan_trajectory = None self.get_workspace()
def do_continue(self, line): ''' Will continue the training of the currently selected model ''' if not self.has_model_selected(): print 'please select or create a model' return False m = ModelManager(self.prompt[:-1]) state = m.load_current_state(add_hidden=True) model = m.load_currently_selected_model() train.train2(m, state, model)
def main(): parser = make_parser() args = parser.parse_args() data_mngr = DataManager(args.data_path) data = pd.DataFrame() model_mngr = ModelManager(args.model_name, args.task_type) if args.features == 'ALL': data = data_mngr.load_data() else: data = data_mngr.take_some_features(args.features) X, y = DataManager.get_X_y(data, args.target) model_mngr.run(X, y, args.ratio)
def __init__(self, arm="ur5_manipulator", gripper="/ur5/vacuum_gripper/grasp"): roscpp_initialize(sys.argv) self.arm = moveit_commander.MoveGroupCommander(arm) filename = os.path.join(rospkg.RosPack().get_path('rqt_industrial_robot'), 'src','rqt_vacuum_gripper', 'joints_setup.yaml') with open(filename) as file: joints_setup = yaml.load(file) jointslimit = joints_setup["joints_limit"] home_value = joints_setup["home_value"] j1 = home_value["joint_1"] j2 = home_value["joint_2"] j3 = home_value["joint_3"] j4 = home_value["joint_4"] j5 = home_value["joint_5"] j6 = home_value["joint_6"] self.set_home_value([j1, j2, j3, j4, j5, j6]) self.back_to_home() self.gripper = gripper self.gripper_pub = rospy.Publisher(gripper, Bool, queue_size=0) self.pose = self.arm.get_current_pose().pose self.x = self.pose.position.x self.y = self.pose.position.y self.z = self.pose.position.z quaternion = (self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w) euler = euler_from_quaternion(quaternion) self.roll = euler[0] self.pitch = euler[1] self.yaw = euler[2] self.gripper_length = 0 self.event = threading.Event() self.movement_finish = False self.modelmanager = ModelManager() self.modelmanager.spawn_all_model() self.plan_trajectory = None self.get_workspace()
def do_select(self, line): ''' Will show a list of possible models to load (if any exist). If a model name is initially provided, the model will be loaded instead. if a model is selected, model versions will be shown to select from. ''' if self.has_model_selected(): m = ModelManager(self.prompt[:-1]) selection = self.select_in_dir(m.folders['model_versions'], type='files') if not selection: return False for root, dirs, files in os.walk(m.folders['current_version']): for f in files: logging.debug('removing %s from selection'%f) os.unlink(os.path.join(root, f)) logging.debug('copying %s to %s'%(selection, m.folders['current_version'])) shutil.copy(m.folders['model_versions']+selection, m.folders['current_version']+selection) return False # list existing models if line == '': selection = self.select_in_dir(MODEL_DIR) else: selection = line if selection == False: print 'selection failed' return False model_loc = MODEL_DIR+selection if not os.path.exists(model_loc): print 'could not find', model_loc return False if self.has_model_selected(): logging.debug('dropping already selected model') print 'selecting', model_loc m = ModelManager(selection) self.prompt = selection + '$'
def run(): """ """ logger = init() try: data = DataManager(path_to_raw) model = ModelManager(data, augment_data) logger.info(f"Creating new folder in {results_path} to save model object and results...") folder_name = time.strftime("%Y-%m-%d_%H-%M-%S_RUN") mkdir(results_path+folder_name) logger.info(f"{folder_name} created in {results_path}.") logger.info(f"Saving model object in the created folder...") with open(results_path + folder_name + "/" + model_name, "wb") as output: pickle.dump(model, output, pickle.HIGHEST_PROTOCOL) logger.info("Saving of model object completed.") if generate_results_report : report_file = open(results_path + folder_name + "/" + "Run_Report.txt", "w") report_file.write(write_report_from_run(model, augment_data)) report_file.close() logger.info("Main program completed successfully.") return model except Exception as e: logger.error("An error occured, program will be interrupted. Details : " + str(e)) raise e
def do_load_pretrained_word_embeddings(self, line): ''' Will load the word embeddings from a pre-trained model that fit to the vocabulary of the currently selected model. The resulting embeddings file will be stored in the model's folder and can be selected with select_word_embeddings If executed with: --fix_pretrained , pre-trained word embeddings will not be tuned during training. ''' if not self.has_model_selected(): print 'please select or create a model' return False fix_pretrained = False if line != '': if line == '--fix_pretrained': fix_pretrained = True else: print 'Did not understand input: ', line return False m = ModelManager(self.prompt[:-1]) selection = self.select_in_dir(m.folders['pre_trained_word_embeddings'], type='files') if selection: file_path = m.folders['pre_trained_word_embeddings'] + selection else: print 'selection failed' return False word_embedding_tools.load_pretrained_embeddings(m, file_path, fix_pretrained)
def do_build_lshf_model(self, input): ''' Will train a LSH-Forest on a part of the corpus You can specify the percentage of the corpus to be used by build_lshf_model <percentage> build_lshf_model 0.1 would use 10% of the corpus to train the model. ''' if not self.has_model_selected(): print 'please select or create a model' return False m = ModelManager(self.prompt[:-1]) if input == '': percentage = 1.0 else: percentage = float(input) lsh_forest.train_lsh_forest(m, corpus_percentage=percentage) lsh_forest.save_linked_utterance_embeddings(m)
def __init__(self, options): print('---------------') self.opt = options # Create network and optimiser self.model_manager = ModelManager(self.opt) assert self.opt.load_path is not None self.model_manager.load_model(weights_path=self.opt.load_path, load_optimiser=False) # extract model, optimiser and scheduler for easier access self.model = self.model_manager.model self.model.eval() path_info = load_config(self.opt.config_path) self.test_loaders = {} for test_data_type in self.opt.test_data_types: data_path = path_info[test_data_type] width, height = sizes_lookup[self.opt.network][test_data_type] # create dataloaders folder = 'kitti' if 'kitti' in test_data_type else test_data_type textfile = test_data_type + '.txt' if 'kitti' in test_data_type else 'test_files.txt' filename_path = os.path.join('splits', folder, textfile) test_filenames = readlines(filename_path) dataset_class = data_type_lookup[test_data_type] test_dataset = dataset_class( data_path, test_filenames, height, width, is_train=False, disable_normalisation=self.opt.disable_normalisation, kitti2012=test_data_type == 'kitti2012', load_gt=test_data_type != 'kitti2015submission') test_loader = DataLoader(test_dataset, shuffle=False, drop_last=False, num_workers=self.opt.num_workers, batch_size=1) self.test_loaders[test_data_type] = test_loader self.error_metrics = defaultdict(list) self.resized_disps = []
def execute_session(self, model_name, alpha_acc, exp_acc, alpha_dist, exp_dist, alpha_steps, t): """ Executes a single training session of a model :param model_name: Name of the model :param alpha_acc: Coverage reward :param exp_acc: Coverage exponential reward :param alpha_dist: Distance reward :param exp_dist: Distance Exponential Reward :param alpha_steps: Step Reward :param t: Which architecture to be used """ print("\n==================== New Session {} ============================".format(model_name)) print("acc: {} - {}, dist: {} - {}, steps {}, views: {}, LR: {}\n" .format(alpha_acc, exp_acc, alpha_dist, exp_dist, alpha_steps, self.alpha_views, self.learning_rate)) if self.use_executable: env = UnityEnvironment(file_name=self.env_name) else: env = UnityEnvironment(file_name=None) default_brain = env.brain_names[0] env_info = env.reset(train_mode=False)[default_brain] num_output = len(env_info.action_masks[0]) # Fetching model model_manager = ModelManager(load=self.load_model, num_views=num_output, num_output=num_output, model_name=model_name, learning_rate=self.learning_rate, variation=t) # Train trainer = Trainer(model_manager, env, self.max_step) trainer.set_reward_values(alpha_acc, exp_acc, alpha_dist, exp_dist, alpha_steps, self.alpha_views) synopsis = SynopsisManager(trainer, model_manager, run_name=model_name, max_step=self.max_step) trainer.train(self.num_generations, self.num_batches, self.batch_size, self.test_size) synopsis.print_training_summary() trainer.evaluate_solution(self.evaluation_size) # Close environment env.close() # Save model model_manager.save_model() # Cleanup # del trainer.memory del trainer del synopsis del model_manager
class TrackManager(object): def __init__(self, global_config, data_source): self.global_config = global_config self.model_manager = ModelManager(global_config, data_source, self) self.tracks = {} self.active_ids = [] self.currently_highest_id = 0 def real_track_real_measurement(self, global_track_id, measurement): self.tracks[global_track_id].add_measurement(measurement, is_artificial=False) self.model_manager.update_by_id(global_track_id, measurement) def real_track_pseudo_measurement(self, global_track_id, measurement): is_alive_probability = self.tracks[global_track_id].add_measurement( measurement, is_artificial=True) if is_alive_probability >= 0: self.model_manager.update_by_id(global_track_id, measurement) return True else: try: self.active_ids.remove(global_track_id) except Exception: logging.error('error in real_track_pseudo_measurement') code.interact(local=dict(globals(), **locals())) self.model_manager.delete_by_id(global_track_id) return False def pseudo_track_real_measurement(self, measurement, current_timestep): global_track_id = self.currently_highest_id self.global_config['highest_id'] = global_track_id self.currently_highest_id += 1 self.active_ids.append(global_track_id) self.tracks[global_track_id] = Track(self.global_config, current_timestep, measurement, **self.global_config['Track']) self.model_manager.create_by_id(global_track_id, measurement) return global_track_id def get_predictions(self): predictions, variances = self.model_manager.predict_all() if len(self.active_ids) != len(predictions.keys()): logging.error( "something with the id management doesn't work in get_predictions!" ) code.interact(local=dict(globals(), **locals())) return predictions, variances def get_alive_probability(self, track_id): return self.tracks[track_id].is_alive_probability
def train(self, train_dataset, remaining_time_budget=None): """model training on train_dataset. :param train_dataset: tuple, (train_x, train_y) train_x: list of vectors, input train speech raw data. train_y: A `numpy.ndarray` matrix of shape (sample_count, class_num). here `sample_count` is the number of examples in this dataset as train set and `class_num` is the same as the class_num in metadata. The values should be binary. :param remaining_time_budget: """ if remaining_time_budget < 10: return if self.metadata['class_num'] <= 7: if self.done_training: return self.train_loop_num += 1 if self.train_loop_num == 1: self.data_manager = DataManager(self.metadata, train_dataset) self.model_manager = ModelManager(self.metadata, self.data_manager) self.model_manager.fit(train_loop_num=self.train_loop_num) if self.train_loop_num > 500: self.done_training = True else: self.train_loop_num += 1 if self.train_loop_num == 1: self.data_manager = DataManager(self.metadata, train_dataset) self.model_manager = ModelManager(self.metadata, self.data_manager) if self.model_manager._last_model_name == None: self.model_manager.fit(train_loop_num=self.train_loop_num) else: if self.resnet_model is None: self.time2 = remaining_time_budget - 26 self.resnet_model = res34_model.Model(self.metadata) self.resnet_model.train_resnet(train_dataset, remaining_time_budget)
def execute(dataset_name, dir_npy, reload_data): # from train_data_generator import FCTrainDataGenerator data_generator = FCTrainDataGenerator() # from model_manager import ModelManager model_manager = ModelManager() # loop_step_raw_data(dataset_name, dir_npy, data_generator, model_manager, reload_data)
def do_encode_corpus(self, input): ''' Will encode the currently selected corpus and save the resulting embeddings to disk ''' if not self.has_model_selected(): print 'please select or create a model' return False m = ModelManager(self.prompt[:-1]) save_embeddings_to_file(m) check_embeddings_consistency(m)
def __init__(self): self.__actorMgr = ActorManager() self.__modelMgr = ModelManager() self.__terraMgr = TerrainManager() self.__render = None self.__showbase = None self.__camCtrlr = None self.__lightCtrlr = None
def do_train(self, line): ''' Will start the training using the configuration stored in state.txt ''' if not self.has_model_selected(): print 'please select or create a model' return False parser = argparse.ArgumentParser() parser.add_argument('-gui', action='store_true', help='The size of the training set given by a floating point number between 0 and 1.', default=False) args = parser.parse_args(line) m = ModelManager(self.prompt[:-1]) state = m.load_current_state(add_hidden=True) train.train2(m, state, None) print
def do_state(self, line): ''' Will link the specified model configuration (see state.py for an overview) to the selected model. ''' if not self.has_model_selected(): print 'please select or create a model' return False if line == '': print 'Please provide a model configuration name as input (see state.py for an overview).' return False if os.path.exists(MODEL_DIR+self.prompt+'/state.txt'): print 'Model configuration already specified!' user_input = raw_input('overwrite? (y/n):') if not user_input.strip().startswith('y'): print 'aborting' return False manager = ModelManager(self.prompt[:-1]) manager.select_state(line)
def do_build_database(self, input): ''' Will build a database from scratch storing binarized dialogues for quick access, as well as an indexing structures. ''' if not self.has_model_selected(): print 'please select or create a model' return False m = ModelManager(self.prompt[:-1]) build_database_from_scratch(m)
def write_stg2_files(write_dir, samples_to_write, shard_size, batch_size, devices_to_use, model_dir, data_dir, results_dir, model_config, schedule_config, use_mixed_precision, use_xla, show_steps): if not os.path.exists(write_dir): os.mkdir(write_dir) elif os.path.isdir(write_dir): if len(os.listdir(write_dir)) != 0: print( "The stage 2 data directory is not empty, so training will use the files in here." ) return show_mode = 'DDIM' manager = ModelManager(devices_to_use, model_dir, data_dir, results_dir, model_config, schedule_config, use_mixed_precision, use_xla, show_mode, show_steps) num = manager._get_last_ckpt_num() manager.load_models(num) n_shards = samples_to_write // shard_size remainder = samples_to_write % shard_size h, w = manager.ema_model.h, manager.ema_model.w if remainder != 0: n_shards += 1 n_total_ex = 0 for i in range(n_shards): data_x = np.zeros((0, h, w, 3)).astype('float16') data_y = np.zeros((0, h, w, 3)).astype('uint8') if i == n_shards - 1 and remainder != 0: ss = remainder else: ss = shard_size shard_rem = ss % batch_size for j in range(ss // batch_size): inps, outs = manager.generate_samples(batch_size, batch_size, verbose=False) data_x = np.concatenate((data_x, inps)) data_y = np.concatenate((data_y, outs)) if shard_rem != 0: inps, outs = manager.generate_samples(shard_rem, shard_rem, verbose=False) data_x = np.concatenate((data_x, inps)) data_y = np.concatenate((data_y, outs)) assert data_x.shape[0] == ss and data_x.shape == data_y.shape x_savepath = os.path.join(write_dir, 'data_x_{}'.format(i)) y_savepath = os.path.join(write_dir, 'data_y_{}'.format(i)) np.save(x_savepath, data_x) np.save(y_savepath, data_y) n_total_ex += len(data_x) print("Finished writing {} examples to {}".format( n_total_ex, write_dir))
def do_create(self, line): ''' Will create a new model directory structure for the specified name ''' new_dir = MODEL_DIR+line if os.path.exists(new_dir): print 'Model already exists!' return False logging.debug('creating folder structure') manager = ModelManager(line) logging.debug('model created') self.prompt = line + '$'
def __init__(self, model, server_preprocessor=True): if type(model) in (basestring, str): model = ModelManager(model) self.model = model self.pipe = filter_pipeline.FilterPipeline(self.model) self.server_preprocessor = server_preprocessor self.encoder = self.model.load_currently_selected_model() if not self.server_preprocessor: self.pipe.add_tokenizer() self.pipe.add(lambda smth: [smth]) self.pipe.add_spell_corrector() self.pipe.add_finalizer()
def evaluate_model(self, model_name): """ Session for evaluating every model. Uses the concept of the Trainer in a deterministic manner :param model_name: Name of the model to be evaluated """ print( "\n==================== New Evaluation {} ============================" .format(model_name)) if self.use_executable: env = UnityEnvironment(file_name=self.env_name) else: env = UnityEnvironment(file_name=None) default_brain = env.brain_names[0] env_info = env.reset(train_mode=False)[default_brain] num_output = len(env_info.action_masks[0]) # Fetching model model_path = self.path_to_models + model_name + ".h5" model_manager = ModelManager(load=True, num_views=num_output, num_output=num_output, model_name=model_path) # Change the model name if "_" in model_name and False: model_name = "evaluation_" + model_name.split("_", 1)[1] else: model_name = "eval_" + model_name model_name = "eval_coverage_progression" # Evaluating the model trainer = Trainer(model_manager, env, self.max_step) synopsis = SynopsisManager(trainer, model_manager, run_name=model_name, max_step=self.max_step) trainer.evaluate_solution(self.evaluation_size) # Close environment env.close() # Cleanup # del trainer.memory del trainer del synopsis del model_manager
def encode(args): m = ModelManager(args.model_name) inp = load_input(args) wrapper = EncoderWrapper(m, args.preprocess == 'server') for conv in inp: encoded = wrapper.encode(conv) if args.return_type == 'print': print str(encoded) if args.return_type == 'npz': with open(args.result_save_loc, 'wb') as f: numpy.savez(f, numpy.array(encoded))
def do_load_data(self, line): ''' Loads textual data and converts it into a format that can be used by the model. optional arguments are: -train_set_size <percentage as float> -valid_set_size <percentage as float> example: -train_set_size 0.85 -valid_set_size 0.1 set sizes: train 85%, valid 10%, and test 5% ''' if not self.has_model_selected(): print 'please select or create a model' return False parser = argparse.ArgumentParser() parser.add_argument('-train_set_size', type=float, help='The size of the training set given by a floating point number between 0 and 1.', default=0.85) parser.add_argument('-valid_set_size', type=float, help='The size of the validation set given by a floating point number between 0 and 1.', default=0.10) input = parser.parse_args(line.split()) corpus_selection = self.select_in_dir(CORPORA_DIR, type='files') if corpus_selection == False: print 'selection failed' return False m = ModelManager(self.prompt[:-1]) logging.debug('copying textual data...') shutil.copy(CORPORA_DIR+corpus_selection, m.folders['data']) logging.debug('Train set size: %.3f, Validation set size: %.3f, the remaining data is used for the test set'%(input.train_set_size, input.valid_set_size)) corpora_processing.convert_to_binarized_data(m, m.folders['data']+corpus_selection, m.folders['binarized'], train_set_size=input.train_set_size, valid_set_size=input.valid_set_size)
def do_train_word2vec(self, line): ''' Given the training data of the currently selected model, word embeddings will be trained using the gensim word2vec library. One can specify the feature length (default is 300): train_word2vec <feature_length> train_word2vec 300 ''' if not self.has_model_selected(): print 'please select or create a model' return False try: feature_length = int(line) except: feature_length = 300 word_embedding_tools.train_embeddings(ModelManager(self.prompt[:-1]), feature_length)
class SceneManager(object): def __init__(self): self.__actorMgr = ActorManager() self.__modelMgr = ModelManager() self.__terraMgr = TerrainManager() self.__render = None self.__showbase = None self.__camCtrlr = None self.__lightCtrlr = None """"""""""""""""""""""""""""""""""""""" 场景管理函数,包括创建、更新、剔除、隐藏等 """"""""""""""""""""""""""""""""""""""" def build_on(self, showbase): self.__showbase = showbase self.__render = self.__showbase.render # 添加动态模型场景 def add_actor_scene(self, resPath, extraResPath, parentNode): actor = self.__actorMgr.load_res(resPath, extraResPath) actor.reparentTo(parentNode) return actor ##################### # 添加静态模型场景 def add_model_scene(self, resPath, parentNode): model = self.__modelMgr.load_res(resPath) model.reparentTo(parentNode) return model ##################### # 添加地形场景 def add_terrain_scene(self, resPath, extraResPath, parentNode): terrain = self.__terraMgr.load_res(resPath, extraResPath) terrain.getRoot().reparentTo(parentNode) return terrain ##################### # 更新场景 def update_scene(self, task): # 更新地形 #self.__terraMgr.update_terrain(task) self.__actorMgr.update_actors(task) self.__camCtrlr.update_camera(task) return task.cont ##################### # 总的资源ID查询 def get_resId(self, res): resId = None resId = self.__actorMgr.get_resId(res) if resId is None: resId = self.__modelMgr.get_resId(res) if resId is None: resId = self.__terraMgr.get_resId(res) return resId ##################### # 总的资源查询 def get_res(self, resId): if resId == "render": return self.__render res = self.__actorMgr.get_res(resId) if res is None: res = self.__modelMgr.get_res(resId) if res is None: res = self.__terraMgr.get_res(resId) return res def bind_CameraController(self, camCtrlr): self.__camCtrlr = camCtrlr self.__actorMgr.bind_CameraController(camCtrlr) def get_camCtrlr(self): return self.__camCtrlr def bind_LightController(self, lightCtrlr): self.__lightCtrlr = lightCtrlr self.__lightCtrlr.bind_SceneManager(self) def get_lightCtrlr(self): return self.__lightCtrlr """"""""""""""""""""" 读档存档的场景数据接口 """"""""""""""""""""" # Pos : LPoint3f # Hpr : LVecBase3f # Scale : LVecBase3f # Color : LVecBase4f # LVecBase4f, LVecBase3f((0, 0, 0))和LPoint3f((0, 0, 0))这样的初始化是可以的 # 导入场景数据,用于读档 def import_sceneArcPkg(self, sceneArcPkg): actorArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "actor": actorArcPkg = arcPkg actorMgr = ActorManager() self.__actorMgr = actorMgr #actorArcPkg.print_metaData() for actorItem in actorArcPkg.get_itemsData(): actor = actorMgr.load_res(_resId = actorItem[0], resPath = actorItem[1], extraResPath = actorItem[2]) #print "the sceneMgr get_res : ", self.get_res(actorItem[0]) actor.setPos(actorItem[3]) actor.setHpr(actorItem[4]) actor.setScale(actorItem[5]) parentNode = self.get_res(actorItem[6]) actor.reparentTo(parentNode) eventActionRecord = actorArcPkg.get_metaData("eventActionRecord") eventEffertRecord = actorArcPkg.get_metaData("eventEffertRecord") #print "in import : eventActionRecord ", len(eventActionRecord) for actorId, record in eventActionRecord.iteritems(): for toggleEvent, actionName in record.iteritems(): actorMgr.add_toggle_to_actor(toggleEvent, actorId, actionName) for actorId, record in eventEffertRecord.iteritems(): for toggleEvent, effertList in record.iteritems(): for effert in effertList: print "import eventEffertRecord : ", toggleEvent, ", ", actorId, ", ", effert actorMgr.add_effert_to_actor(toggleEvent, actorId, effert[0]) #actorMgr.set_clock(globalClock) actorMgr.print_eventActionRecord() actorMgr.print_eventEffertRecord() #self.__actorMgr = actorMgr # Model数据读档 modelArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "model": modelArcPkg = arcPkg modelMgr = ModelManager() self.__modelMgr = modelMgr for modelItem in modelArcPkg.get_itemsData(): model = modelMgr.load_res(_resId = modelItem[0], resPath = modelItem[1]) model.setPos(modelItem[2]) model.setHpr(modelItem[3]) model.setScale(modelItem[4]) parentNode = self.get_res(resId = modelItem[5]) model.reparentTo(parentNode) # Terrain数据读档 terraArcPkg = None terraMgr = TerrainManager() for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "terrain": terraArcPkg = arcPkg for terraItem in terraArcPkg.get_itemsData(): terrain = terraMgr.load_res(_resId = terraItem[0], resPath = terraItem[1], extraResPath = terraItem[2]) terrain.getRoot().setPos(terraItem[3]) terrain.getRoot().setHpr(terraItem[4]) terrain.getRoot().setScale(terraItem[5]) parentNode = self.get_res(resId = terraItem[6]) terrain.getRoot().reparentTo(parentNode) terraMgr.set_currTerrain(terraArcPkg.get_metaData("currTerraId")) self.__terraMgr = terraMgr # Camera数据读档 cameraArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "camera": cameraArcPkg = arcPkg camCtrlr = CameraController() self.__camCtrlr = camCtrlr camCtrlr.bind_camera(self.__showbase.cam) camCtrlr.bind_ToggleHost(self.__showbase) cameraArcPkg = cameraArcPkg.get_itemsData()[0] #print "in import : ", cameraArcPkg[0] camCtrlr.get_camToCtrl().setPos(cameraArcPkg[0]) camCtrlr.get_camToCtrl().setHpr(cameraArcPkg[1]) camCtrlr.set_moveSpeed(cameraArcPkg[2]) camCtrlr.set_rotateSpeed(cameraArcPkg[3]) objToFocus = self.get_res(cameraArcPkg[4]) camCtrlr.focus_on(objToFocus, cameraArcPkg[5]) camCtrlr.set_optsSwitch(cameraArcPkg[6]) #camCtrlr.set_toggleEventToOpts(cameraArcPkg[7]) self.__actorMgr.bind_CameraController(camCtrlr) for toggleEvent, opt in cameraArcPkg[7].iteritems(): camCtrlr.add_toggle_to_opt(toggleEvent, opt) # print "Camera Pos : ", camCtrlr.get_camToCtrl().getPos() # print "Camera Hpr : ", camCtrlr.get_camToCtrl().getHpr() # Light数据读档 lightArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "light": lightArcPkg = arcPkg lightCtrlr = LightController() self.__lightCtrlr = lightCtrlr lightCtrlr.bind_SceneManager(self) for lightItem in lightArcPkg.get_itemsData(): light = lightCtrlr.create_light(_lightId = lightItem[0], lightType = SeriousTools.extract_name_from_Id(lightItem[0]), lightColor = lightItem[1], lightPos = lightItem[2], lightHpr = lightItem[3], targetId = lightItem[4], parentId = lightItem[6]) #print "in import : ", light for setorId in lightItem[5]: lightCtrlr.set_light_to(lightItem[0], setorId) ##################### # 导出场景数据,用于存档 # 考虑到获取父节点ID的全局性,故将其他几种资源的存档数据放到这里进行保存 def export_sceneArcPkg(self): # Actor数据存档 actorArcPkg = self.__actorMgr.get_arcPkg() actorResPath = self.__actorMgr.get_resPath() #actorArcPkg.set_metaData("toggleEffert", self.__actorMgr.get_toggleEffert()) actorArcPkg.set_metaData("eventActionRecord", self.__actorMgr.get_eventActionRecord()) actorArcPkg.set_metaData("eventEffertRecord", self.__actorMgr.get_eventEffertRecord()) for actorId, actor in self.__actorMgr.get_resMap().iteritems(): actorItem = [] actorItem.append(actorId) actorItem.append(actorResPath[actorId][0]) actorItem.append(actorResPath[actorId][1]) actorItem.append(actor.getPos()) actorItem.append(actor.getHpr()) actorItem.append(actor.getScale()) parentNode = actor.getParent() if parentNode.getName() is "render": actorItem.append("render") else: parentId = self.get_resId(parentNode) if parentId is None: actorItem.append("render") else: actorItem.append(parentId) actorArcPkg.add_item(actorItem) ########## # Model数据存档 modelArcPkg = self.__modelMgr.get_arcPkg() modelResPath = self.__modelMgr.get_resPath() for modelId, model in self.__modelMgr.get_resMap().iteritems(): modelItem = [] modelItem.append(modelId) modelItem.append(modelResPath[modelId]) modelItem.append(model.getPos()) modelItem.append(model.getHpr()) modelItem.append(model.getScale()) parentNode = model.getParent() if parentNode.getName() is "render": modelItem.append("render") else: parentId = self.get_resId(parentNode) if parentId is None: modelItem.append("render") else: modelItem.append(parentId) modelArcPkg.add_item(modelItem) ########## # Terrain数据存档 terraArcPkg = self.__terraMgr.get_arcPkg() terraArcPkg.set_metaData("currTerraId", self.__terraMgr.get_currTerrain()) terraResPath = self.__terraMgr.get_resPath() for terrainId, terrain in self.__terraMgr.get_resMap().iteritems(): terrainItem = [] terrainItem.append(terrainId) terrainItem.append(terraResPath[terrainId][0]) terrainItem.append(terraResPath[terrainId][1]) terrainItem.append(terrain.getRoot().getPos()) terrainItem.append(terrain.getRoot().getHpr()) terrainItem.append(terrain.getRoot().getScale()) parentNode = terrain.getRoot().getParent() if parentNode.getName() is "render": terrainItem.append("render") else: parentId = self.get_resId(parentNode) if parentId is None: terrainItem.append("render") else: terrainItem.append(parentId) terraArcPkg.add_item(terrainItem) ########## # Camera数据存档 camArcPkg = self.__camCtrlr.get_arcPkg() cam = self.__camCtrlr.get_camToCtrl() camItem = [] camItem.append(cam.getPos()) camItem.append(cam.getHpr()) camItem.append(self.__camCtrlr.get_moveSpeed()) camItem.append(self.__camCtrlr.get_rotateSpeed()) focusObjId = self.get_resId(self.__camCtrlr.get_focusObj()) camItem.append(focusObjId) camItem.append(self.__camCtrlr.get_rotateRadius()) camItem.append(self.__camCtrlr.get_optsSwitch()) camItem.append(self.__camCtrlr.get_toggleEventToOpts()) #camItem.append(None) camArcPkg.add_item(camItem) ########## # Light数据存档 lightArcPkg = self.__lightCtrlr.get_arcPkg() lightTargetMap = self.__lightCtrlr.get_targetMap() lightSetorMap = self.__lightCtrlr.get_setorMap() for lightId, light in self.__lightCtrlr.get_lightMap().iteritems(): lightItem = [] lightItem.append(lightId) lightItem.append(light.node().getColor()) lightItem.append(light.getPos()) lightItem.append(light.getHpr()) if lightTargetMap.has_key(lightId) is True: lightItem.append(lightTargetMap[lightId]) else: lightItem.append(None) lightItem.append(lightSetorMap[lightId]) parentNode = light.getParent() if parentNode.getName() is "render": lightItem.append("render") else: parentId = self.get_resId(parentNode) if parentId is None: lightItem.append("render") else: lightItem.append(parentId) lightArcPkg.add_item(lightItem) ########## sceneArcPkg = [ actorArcPkg, modelArcPkg, terraArcPkg, camArcPkg, lightArcPkg ] print "in export : ", len(lightArcPkg.get_itemsData()) return sceneArcPkg """"""""""""""" 成员变量的get函数 """"""""""""""" def set_render(self, render): self.__render = render def get_render(self): return self.__render def get_ActorMgr(self): return self.__actorMgr def get_ModelMgr(self): return self.__modelMgr def get_TerraMgr(self): return self.__terraMgr def get_CamCtrlr(self): return self.__camCtrlr def get_LightCtrlr(self): return self.__lightCtrlr
def import_sceneArcPkg(self, sceneArcPkg): actorArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "actor": actorArcPkg = arcPkg actorMgr = ActorManager() self.__actorMgr = actorMgr #actorArcPkg.print_metaData() for actorItem in actorArcPkg.get_itemsData(): actor = actorMgr.load_res(_resId = actorItem[0], resPath = actorItem[1], extraResPath = actorItem[2]) #print "the sceneMgr get_res : ", self.get_res(actorItem[0]) actor.setPos(actorItem[3]) actor.setHpr(actorItem[4]) actor.setScale(actorItem[5]) parentNode = self.get_res(actorItem[6]) actor.reparentTo(parentNode) eventActionRecord = actorArcPkg.get_metaData("eventActionRecord") eventEffertRecord = actorArcPkg.get_metaData("eventEffertRecord") #print "in import : eventActionRecord ", len(eventActionRecord) for actorId, record in eventActionRecord.iteritems(): for toggleEvent, actionName in record.iteritems(): actorMgr.add_toggle_to_actor(toggleEvent, actorId, actionName) for actorId, record in eventEffertRecord.iteritems(): for toggleEvent, effertList in record.iteritems(): for effert in effertList: print "import eventEffertRecord : ", toggleEvent, ", ", actorId, ", ", effert actorMgr.add_effert_to_actor(toggleEvent, actorId, effert[0]) #actorMgr.set_clock(globalClock) actorMgr.print_eventActionRecord() actorMgr.print_eventEffertRecord() #self.__actorMgr = actorMgr # Model数据读档 modelArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "model": modelArcPkg = arcPkg modelMgr = ModelManager() self.__modelMgr = modelMgr for modelItem in modelArcPkg.get_itemsData(): model = modelMgr.load_res(_resId = modelItem[0], resPath = modelItem[1]) model.setPos(modelItem[2]) model.setHpr(modelItem[3]) model.setScale(modelItem[4]) parentNode = self.get_res(resId = modelItem[5]) model.reparentTo(parentNode) # Terrain数据读档 terraArcPkg = None terraMgr = TerrainManager() for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "terrain": terraArcPkg = arcPkg for terraItem in terraArcPkg.get_itemsData(): terrain = terraMgr.load_res(_resId = terraItem[0], resPath = terraItem[1], extraResPath = terraItem[2]) terrain.getRoot().setPos(terraItem[3]) terrain.getRoot().setHpr(terraItem[4]) terrain.getRoot().setScale(terraItem[5]) parentNode = self.get_res(resId = terraItem[6]) terrain.getRoot().reparentTo(parentNode) terraMgr.set_currTerrain(terraArcPkg.get_metaData("currTerraId")) self.__terraMgr = terraMgr # Camera数据读档 cameraArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "camera": cameraArcPkg = arcPkg camCtrlr = CameraController() self.__camCtrlr = camCtrlr camCtrlr.bind_camera(self.__showbase.cam) camCtrlr.bind_ToggleHost(self.__showbase) cameraArcPkg = cameraArcPkg.get_itemsData()[0] #print "in import : ", cameraArcPkg[0] camCtrlr.get_camToCtrl().setPos(cameraArcPkg[0]) camCtrlr.get_camToCtrl().setHpr(cameraArcPkg[1]) camCtrlr.set_moveSpeed(cameraArcPkg[2]) camCtrlr.set_rotateSpeed(cameraArcPkg[3]) objToFocus = self.get_res(cameraArcPkg[4]) camCtrlr.focus_on(objToFocus, cameraArcPkg[5]) camCtrlr.set_optsSwitch(cameraArcPkg[6]) #camCtrlr.set_toggleEventToOpts(cameraArcPkg[7]) self.__actorMgr.bind_CameraController(camCtrlr) for toggleEvent, opt in cameraArcPkg[7].iteritems(): camCtrlr.add_toggle_to_opt(toggleEvent, opt) # print "Camera Pos : ", camCtrlr.get_camToCtrl().getPos() # print "Camera Hpr : ", camCtrlr.get_camToCtrl().getHpr() # Light数据读档 lightArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "light": lightArcPkg = arcPkg lightCtrlr = LightController() self.__lightCtrlr = lightCtrlr lightCtrlr.bind_SceneManager(self) for lightItem in lightArcPkg.get_itemsData(): light = lightCtrlr.create_light(_lightId = lightItem[0], lightType = SeriousTools.extract_name_from_Id(lightItem[0]), lightColor = lightItem[1], lightPos = lightItem[2], lightHpr = lightItem[3], targetId = lightItem[4], parentId = lightItem[6]) #print "in import : ", light for setorId in lightItem[5]: lightCtrlr.set_light_to(lightItem[0], setorId)