def __init__(self, size=256, depth=3, batch_size=32, keep_prob=0.1, max_nsteps=1000, checkpoint_dir="checkpoint", forward_only=False): """Initialize the parameters for an Deep LSTM model. Args: size: int, The dimensionality of the inputs into the Deep LSTM cell [32, 64, 256] learning_rate: float, [1e-3, 5e-4, 1e-4, 5e-5] batch_size: int, The size of a batch [16, 32] keep_prob: unit Tensor or float between 0 and 1 [0.0, 0.1, 0.2] max_nsteps: int, The max time unit [1000] """ super(DeepLSTM, self).__init__() self.size = int(size) self.depth = int(depth) self.batch_size = int(batch_size) self.output_size = self.depth * self.size self.keep_prob = float(keep_prob) self.max_nsteps = int(max_nsteps) self.checkpoint_dir = checkpoint_dir start = time.clock() print(" [*] Building Deep LSTM...") self.cell = LSTMCell(size, forget_bias=0.0) if not forward_only and self.keep_prob < 1: d = nn.Dropout(p=keep_prob) self.cell = d(self.cell) self.stacked_cell = MultiRNNCellWithSkipConn([self.cell] * depth) self.initial_state = self.stacked_cell.zero_state( batch_size, torch.float32)