def get_weights(): """ Retrieve the weights from the generated DBN. @return: Weights of the DBN. """ return [array(w) for w in s.load(open(env_paths.get_dbn_weight_path(), "rb" ) )]
def check_for_data(): """ Check for DBN network data. """ if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())): return False return True
def load_dbn_weights(): """ Load the weight matrices from the finetuning. @param weight_matrices: the weight matrices of the finetuning. """ return [array(w) for w in s.load(open(env_paths.get_dbn_weight_path(), "rb" ) )]
def compare_real_data_to_reconstructed_data(): weights = s.load(open(env_paths.get_dbn_weight_path(),"rb")) batches = s.load(open(env_paths.get_batches_path(train=False),"rb")) class_indices = s.load(open(env_paths.get_class_indices_path(False,batches[0]).replace(".0",""),"rb")) batch = batches[0] data = data_processing.get_bag_of_words_matrix(batch,training = False) dict = {} for i in range(len(class_indices)): idx = class_indices[i] if idx in dict.keys(): continue dict[idx] = data[i] if len(dict) >= 10: break print dict.keys() data_points = dict.values() output_data_points = [] for d in data_points: d = append(d,1.) out = generate_output_data(d,weights) output_data_points.append(out) visualise_data_points(data_points,output_data_points)
def get_weights(): """ Retrieve the weights from the generated DBN. @return: Weights of the DBN. """ return [ array(w) for w in s.load(open(env_paths.get_dbn_weight_path(), "rb")) ]
def load_dbn_weights(): """ Load the weight matrices from the finetuning. @param weight_matrices: the weight matrices of the finetuning. """ return [ array(w) for w in s.load(open(env_paths.get_dbn_weight_path(), "rb")) ]
def save_dbn(weight_matrices,fine_tuning_error_train,fine_tuning_error_test,output = None): """ Save the deep belief network into serialized files. @param weight_matrices: the weight matrices of the deep belief network. """ s.dump([w.tolist() for w in weight_matrices] , open( env_paths.get_dbn_weight_path(), "wb" ) ) s.dump(fine_tuning_error_train , open( env_paths.get_dbn_training_error_path(), "wb" ) ) s.dump(fine_tuning_error_test , open( env_paths.get_dbn_test_error_path(), "wb" ) ) if not output == None: out = open(env_paths.get_dbn_output_txt_path(),"w") for elem in output: out.write(elem+"\n") out.close()
def compare_real_data_to_reconstructed_data_random(): weights = s.load(open(env_paths.get_dbn_weight_path(),"rb")) batches = s.load(open(env_paths.get_batches_path(train=False),"rb")) batch = choice(batches) # make sure to pick batch at random data = data_processing.get_bag_of_words_matrix(batch,training = False) # choose 10 data points at random data_points = [] indices = random.randint(0,len(data),10) for idx in indices: data_points.append(data[idx]) output_data_points = [] for d in data_points: d = append(d,1.) out = generate_output_data(d,weights) output_data_points.append(out) visualise_data_points(data_points,output_data_points)
def save_dbn(weight_matrices, fine_tuning_error_train, fine_tuning_error_test, output=None): """ Save the deep belief network into serialized files. @param weight_matrices: the weight matrices of the deep belief network. """ s.dump([w.tolist() for w in weight_matrices], open(env_paths.get_dbn_weight_path(), "wb")) s.dump(fine_tuning_error_train, open(env_paths.get_dbn_training_error_path(), "wb")) s.dump(fine_tuning_error_test, open(env_paths.get_dbn_test_error_path(), "wb")) if not output == None: out = open(env_paths.get_dbn_output_txt_path(), "w") for elem in output: out.write(elem + "\n") out.close()