def __init__(self, weight_matrices, batches, fout, hidden_biases=None, visible_biases=None, binary_output=False, line_searches=3): """ Initialize variables of the finetuning. @param weight_matrices: The list of weight matrices for the DBN. @param batches: The list of batch-sizes. @param fout: The output function. For progress monitoring of the training. @param hidden_biases: The hidden biases for the finetuning. @param visible_biases: The visible biases for the finetuning. @param binary_output: If the output of the DBN must be binary. If so, Gaussian noise will be added to bottleneck. @param line_searches: Number of line searches for the conjugate gradient optimization. """ # Progress and info monitoring self.fout = fout self.binary_output = binary_output self.line_searches = line_searches # Generate sampled noise matrices self.sampled_noise = [] self.current_sampled_noise = None self.batches = batches if hidden_biases == None: # If finetuning shall continue. self.weight_matrices_added_biases = weight_matrices else: weight_matrices = weight_matrices hidden_biases = hidden_biases visible_biases = visible_biases # Unfold network to make deep autoencoder self.weight_matrices_added_biases = [] weight_matrices = append(weight_matrices, weight_matrices[::-1]) visible_hidden_biases = append(hidden_biases, visible_biases[::-1]) # Add the visible and hidden biases to the weight matrices for i in range(len(weight_matrices)): if i < (len(weight_matrices) / 2): # Make sure that the hidden biases are in the same format as the weight matrix tmp = zeros((1, len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append( append(weight_matrices[i], tmp, axis=0)) else: # Make sure that the visible biases are in the same format as the weight matrix tmp = zeros((1, len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append( append(weight_matrices[i].T, tmp, axis=0)) if not os.path.exists(env_paths.get_dbn_batches_lst_path()): self.generate_large_batch() self.large_batches_lst = load_large_batches_lst() if self.binary_output: self.generate_sampled_noise(self.weight_matrices_added_biases)
def __init__(self, weight_matrices, batches, fout, hidden_biases=None, visible_biases=None, binary_output=False, line_searches=3): """ Initialize variables of the finetuning. @param weight_matrices: The list of weight matrices for the DBN. @param batches: The list of batch-sizes. @param fout: The output function. For progress monitoring of the training. @param hidden_biases: The hidden biases for the finetuning. @param visible_biases: The visible biases for the finetuning. @param binary_output: If the output of the DBN must be binary. If so, Gaussian noise will be added to bottleneck. @param line_searches: Number of line searches for the conjugate gradient optimization. """ # Progress and info monitoring self.fout = fout self.binary_output = binary_output self.line_searches = line_searches # Generate sampled noise matrices self.sampled_noise = [] self.current_sampled_noise = None self.batches = batches if hidden_biases == None: # If finetuning shall continue. self.weight_matrices_added_biases = weight_matrices else: weight_matrices = weight_matrices hidden_biases = hidden_biases visible_biases = visible_biases # Unfold network to make deep autoencoder self.weight_matrices_added_biases = [] weight_matrices = append(weight_matrices, weight_matrices[::-1]) visible_hidden_biases = append(hidden_biases, visible_biases[::-1]) # Add the visible and hidden biases to the weight matrices for i in range(len(weight_matrices)): if i < (len(weight_matrices) / 2): # Make sure that the hidden biases are in the same format as the weight matrix tmp = zeros((1, len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append(append(weight_matrices[i], tmp, axis=0)) else: # Make sure that the visible biases are in the same format as the weight matrix tmp = zeros((1, len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append(append(weight_matrices[i].T, tmp, axis=0)) if not os.path.exists(env_paths.get_dbn_batches_lst_path()): self.generate_large_batch() self.large_batches_lst = load_large_batches_lst() if self.binary_output: self.generate_sampled_noise(self.weight_matrices_added_biases)
def __init__(self,weight_matrices,batches,fout,fprogress,hidden_biases = None,visible_biases = None): """ Initialize variables of the finetuning. @param weight_matrices: The list of weight matrices for the DBN. @param batches: The list of batch-sizes. @param fout: The output function. For progress monitoring of the training. @param fprogress: The incrementer. For progress monitoring of the training. @param hidden_biases: The hidden biases for the finetuning. @param visible_biases: The visible biases for the finetuning. """ # Progress and info monitoring self.fout = fout self.fprogress = fprogress self.batches = batches if hidden_biases == None: # If finetuning shall continue. self.weight_matrices_added_biases = weight_matrices else: weight_matrices = weight_matrices hidden_biases = hidden_biases visible_biases = visible_biases # Unfold network to make deep autoencoder self.weight_matrices_added_biases = [] weight_matrices = append(weight_matrices,weight_matrices[::-1]) visible_hidden_biases =append(hidden_biases,visible_biases[::-1]) # Add the visible and hidden biases to the weight matrices for i in range(len(weight_matrices)): if i < (len(weight_matrices)/2): # Make sure that the hidden biases are in the same format as the weight matrix tmp = zeros((1,len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append(append(weight_matrices[i],tmp,axis = 0)) else: # Make sure that the visible biases are in the same format as the weight matrix tmp = zeros((1,len(visible_hidden_biases[i]))) tmp[0] = visible_hidden_biases[i] self.weight_matrices_added_biases.append(append(weight_matrices[i].T,tmp,axis = 0)) if not os.path.exists(env_paths.get_dbn_batches_lst_path()): self.generate_large_batch() self.large_batches_lst = load_large_batches_lst()
def load_large_batches_lst(): return s.load(open(env_paths.get_dbn_batches_lst_path(), 'rb'))
def save_large_batches_lst(lst): s.dump(lst, open(env_paths.get_dbn_batches_lst_path(), 'wb'))