def _train_impl(self, X, y): if self.spec.is_convolution: X = X.reshape(X.shape[:3]) self.iterations = 0 data = zip(X, y) self.dataset = SequentialDataset(data) minibatches = MiniBatches(self.dataset, batch_size=20) self.trainer.run(minibatches, controllers=self.controllers) return self
char_vectors.append( np.eye(1, 26, char_code - ord("a"), dtype=FLOATX)[0]) if len(char_vectors) >= 20: continue word_matrix = np.vstack(char_vectors) data.append((word_matrix, label)) # Shuffle the data random.Random(3).shuffle(data) # Separate data valid_size = int(len(data) * 0.15) train_set = data[valid_size:] valid_set = data[:valid_size] dataset = SequentialDataset(train_set, valid=valid_set) dataset.pad_left(20) dataset.report() batch_set = MiniBatches(dataset) if __name__ == '__main__': model = NeuralClassifier(input_dim=26, input_tensor=3) model.stack( RNN(hidden_size=30, input_type="sequence", output_type="sequence", vector_core=0.1), RNN(hidden_size=30, input_type="sequence", output_type="sequence",
for i in range(SEQUENCE_LEN): a = rand.uniform(0, 1) b = 1 if i in selected_items else 0 if b == 1: sum += a sequence.append(np.array([a, b], dtype=FLOATX)) sequence = np.vstack(sequence) sum = np.array([sum], dtype=FLOATX) data.append((sequence, sum)) # Separate data valid_size = int(1000) train_set = data[valid_size:] valid_set = data[:valid_size] dataset = SequentialDataset(train_set, valid=valid_set) dataset.report() batch_set = MiniBatches(dataset, batch_size=32) if __name__ == '__main__': ap = ArgumentParser() ap.add_argument("--model", default=os.path.join(os.path.dirname(__file__), "models", "sequence_adding_100_2.gz")) args = ap.parse_args() model = NeuralRegressor(input_dim=2, input_tensor=3) model.stack(IRNN(hidden_size=100, input_type="sequence", output_type="one"), Dense(1))
# Create one-hot vector char_vectors.append(np.eye(1, 26, char_code - ord("a"), dtype=FLOATX)[0]) if len(char_vectors) >= 20: continue word_matrix = np.vstack(char_vectors) data.append((word_matrix, label)) # Shuffle the data random.Random(3).shuffle(data) # Separate data valid_size = int(len(data) * 0.15) train_set = data[valid_size:] valid_set = data[:valid_size] dataset = SequentialDataset(train_set, valid=valid_set) dataset.pad_left(20) dataset.report() batch_set = MiniBatches(dataset) if __name__ == '__main__': model = NeuralClassifier(input_dim=26, input_tensor=3) model.stack(RNN(hidden_size=30, input_type="sequence", output_type="sequence", vector_core=0.1), RNN(hidden_size=30, input_type="sequence", output_type="sequence", vector_core=0.3), RNN(hidden_size=30, input_type="sequence", output_type="sequence", vector_core=0.6), RNN(hidden_size=30, input_type="sequence", output_type="one", vector_core=0.9), Dense(4), Softmax()) trainer = SGDTrainer(model)