Esempio n. 1
0
def optimizer_test():
    """
    Manually verify the functionality of the optimizer + neural network.
    """
    net = layers.NeuralNet(layers=[['input', 5], ['tanh', 20], ['linear', 3]])
    optimizer = Optimizer(net)
    X = []
    Y = []
    for i in range(10):
        X.append(np.matrix(np.zeros(5)))
        Y.append(np.matrix(np.ones(3)))
    for i in range(10):
        X.append(np.matrix(np.ones(5)))
        y = np.matrix(np.zeros(3))
        y[:, 1] = 1
        Y.append(y)
                
    optimizer.start_error_plot()
    net.alpha_weight = 1e-4
    net.alpha_bias = 1e-4
    net.adagrad_decay = 0.99
    i = 0
    for i in range(10000):
        optimizer.run_minibatch(X, Y, batch_size=10)
        i+=1 
    
        if i % 100 == 0:
            print ""
            optimizer.update_error_plot()
            print net.forward(X[13].ravel())
            print Y[13]
            print net.forward(X[0].ravel())
            print Y[0]
Esempio n. 2
0
def run_deepq_script():
    """
    Runs the deepq learning script 
    """
    autoencoder = layers.NeuralNet([['input', {
        "size": (1, 220)
    }], ['relu', {
        "size": (1, 15)
    }], ['relu', {
        "size": (1, 220)
    }]])
    autoencoder.load_parameters(
        '/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/encoder'
    )

    q_net = layers.NeuralNet([['input', {
        "size": (1, 15)
    }], ['relu', {
        "size": (1, 8)
    }], ['relu', {
        "size": (1, 7)
    }]])
    opter = optimizer.Optimizer(q_net)
    deep_q = DeepQ(matris=None,
                   optimizer=opter,
                   neural_net=q_net,
                   encoder_net=autoencoder)

    episodes = deep_q.open_object(
        '/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/memory_v2.pkl'
    )

    def callback(matris, time):
        deep_q.matris = matris
        deep_q.play(time)
        deep_q.learn()

    if __name__ == '__main__':
        pygame.init()
        screen = pygame.display.set_mode((WIDTH, HEIGHT))
        pygame.display.set_caption("MaTris")
        game = Game()
        while (1):
            game.main(screen, callback)
Esempio n. 3
0
 def __init__(self):
     self.episodes = []
     self.X = []
     self.Y = []
     self.autoencoder = layers.NeuralNet([
         ['input', {"size": (1, 220)}], 
         ['relu', {"size": (1, 15)}],
         ['relu', {"size": (1, 220)}]
     ])
     self.optimizer = optimizer.Optimizer(self.autoencoder)
     self.autoencoder.alpha_weight = 1e-3
     self.autoencoder.alpha_bias = 1e-3
     self.autoencoder.adagrad_decay = 0.9
     
     self.matris = Matris()
Esempio n. 4
0
    def __init__(self, args):
        super(TriAN, self).__init__()
        self.args = args
        self.embedding_dim = 300
        self.embedding = nn.Embedding(len(vocab),
                                      self.embedding_dim,
                                      padding_idx=0)
        self.embedding.weight.data.fill_(0)
        self.embedding.weight.data[:2].normal_(0, 0.1)
        self.pos_embedding = nn.Embedding(len(pos_vocab),
                                          args.pos_emb_dim,
                                          padding_idx=0)
        self.pos_embedding.weight.data.normal_(0, 0.1)
        self.ner_embedding = nn.Embedding(len(ner_vocab),
                                          args.ner_emb_dim,
                                          padding_idx=0)
        self.ner_embedding.weight.data.normal_(0, 0.1)
        self.rel_embedding = nn.Embedding(len(rel_vocab),
                                          args.rel_emb_dim,
                                          padding_idx=0)
        self.rel_embedding.weight.data.normal_(0, 0.1)
        self.RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU}

        self.p_q_emb_match = layers.SeqAttnMatch(self.embedding_dim)

        # Input size to RNN: word emb + question emb + pos emb + ner emb + manual features
        doc_input_size = 2 * self.embedding_dim + args.pos_emb_dim + args.ner_emb_dim + 5 + args.rel_emb_dim

        # Max passage size
        p_max_size = args.p_max_size
        self.p_max_size = p_max_size

        # Max question size
        q_max_size = args.q_max_size
        self.q_max_size = q_max_size

        # RNN document encoder
        self.doc_rnn = layers.StackedBRNN(
            input_size=doc_input_size,
            hidden_size=args.hidden_size,
            num_layers=args.doc_layers,
            dropout_rate=0,
            dropout_output=args.dropout_rnn_output,
            concat_layers=False,
            rnn_type=self.RNN_TYPES[args.rnn_type],
            padding=args.rnn_padding)

        # RNN question encoder: word emb + pos emb
        qst_input_size = self.embedding_dim + args.pos_emb_dim
        self.question_rnn = layers.StackedBRNN(
            input_size=qst_input_size,
            hidden_size=args.hidden_size,
            num_layers=1,
            dropout_rate=0,
            dropout_output=args.dropout_rnn_output,
            concat_layers=False,
            rnn_type=self.RNN_TYPES[args.rnn_type],
            padding=args.rnn_padding)

        # Output sizes of rnn encoders
        doc_hidden_size = 2 * args.hidden_size
        self.doc_hidden_size = doc_hidden_size
        question_hidden_size = 2 * args.hidden_size
        self.question_hidden_size = question_hidden_size
        # print('p_mask : ' , doc_input_size)

        # Attention over passage and question
        self.q_self_attn_start = layers.LinearSeqAttn(question_hidden_size,
                                                      q_max_size)
        self.p_q_attn_start = layers.BilinearSeqAttn(p_max_size, q_max_size,
                                                     p_max_size)

        self.q_self_attn_end = layers.LinearSeqAttn(question_hidden_size,
                                                    q_max_size)
        self.p_q_attn_end = layers.BilinearSeqAttn(p_max_size, q_max_size,
                                                   p_max_size)

        # Bilinear layer and sigmoid to proba
        self.p_q_bilinear_start = nn.Bilinear(question_hidden_size,
                                              question_hidden_size, 1)
        self.p_q_bilinear_end = nn.Bilinear(question_hidden_size,
                                            question_hidden_size, 1)
        self.p_linear_start = nn.Linear(question_hidden_size, 1)
        self.p_linear_end = nn.Linear(question_hidden_size, 1)
        # Attention start end
        self.start_end_attn = layers.BilinearProbaAttn(p_max_size)
        self.end_start_attn = layers.BilinearProbaAttn(p_max_size)

        # Feed forward
        self.feedforward_start = layers.NeuralNet(p_max_size, p_max_size,
                                                  p_max_size)
        self.feedforward_end = layers.NeuralNet(p_max_size, p_max_size,
                                                p_max_size)
Esempio n. 5
0
}
action_word = [
    'left', 'right', 'rotate0', 'rotate1', 'rotate2', 'rotate3', 'hard_drop'
]

# input: 22 * 10 grid + 7 tetrominoes * 4 rotations
# output: left, right, rotate, hard_drops

column_neurons = []

#n.append(layers.NeuralNet(layers=[['input', 20], ['tanh', 10], ['tanh', 4]]))
n = layers.NeuralNet(layers=[['input', {
    "size": (1, 220)
}], ['tanh', {
    "size": (1, 50)
}], ['tanh', {
    "size": (1, 20)
}], ['linear', {
    "size": (1, 7)
}]])
o = optimizer.Optimizer(n)
n.alpha_weight = 1e-4
n.alpha_bias = 1e-4
n.adagrad_decay = 0.9
"""
Error = (Q(s', a') + reward) - Q(s, a))
Error is prediction of neural network subtracted from
the actual reward and then teh prediction from completing optimaly in the new state
"""
X = []
Y = []