Exemplo n.º 1
0
class ConvolutionalNN():
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(
            embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        in_channel = 1
        out_channel = 16
        kernel_heights = [3, 5, 7]
        keep_probab = 0
        stride = 1
        padding = [1, 2, 3]
        embedding_length = 300

        self.model = CNN(batch_size, output_size, in_channel, out_channel,
                         kernel_heights, stride, padding, keep_probab,
                         vocab_size, embedding_length, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            self.model.parameters()),
                                     weight_decay=0.0005,
                                     lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)

    def train(self, numberOfEpochs):
        patience_threshold = 3
        patience = patience_threshold
        min_valid_loss = np.Inf
        for epoch in range(numberOfEpochs):
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            train_loss, train_acc = self.training_handler.train_model(
                self.model, self.train_iter, epoch)
            val_loss, val_acc = self.training_handler.eval_model(
                self.model, self.valid_iter)
            print(
                f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
            )
            output_handler.outputFileHandler.write(
                f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
            )

            patience -= 1
            if val_loss < min_valid_loss and abs(min_valid_loss -
                                                 val_loss) > 0.005:
                patience = patience_threshold
                torch.save(self.model, "./saved_models/cnn-" + self.embedding)
                min_valid_loss = val_loss

            if patience == 0:
                break

    def test(self):
        self.model = torch.load("./saved_models/cnn-" + self.embedding)
        test_loss, test_acc = self.training_handler.eval_model(
            self.model, self.test_iter)
        return test_loss, test_acc
Exemplo n.º 2
0
 def __init__(self, *args, **kwargs):
     self.model = BasketballModel()
     self.handler = TrainingHandler()
     self.status = 0
     self.last_connection_amount = 0
     self.running_time = datetime.now()
     self.memory = ReplayMemory(100000)
     self.csv = CSVFile()
     super(ModelServer, self).__init__(self.HOST, self.PORT)
class LongShortTermMemoryAttention():
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(
            embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        hidden_size = 256
        embedding_length = 300

        self.model = AttentionModel(batch_size, output_size, hidden_size,
                                    vocab_size, embedding_length,
                                    word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            self.model.parameters()),
                                     weight_decay=0.0005,
                                     lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)

    def train(self, numberOfEpochs):
        patience_threshold = 3
        patience = patience_threshold
        min_valid_loss = np.Inf
        for epoch in range(numberOfEpochs):
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            train_loss, train_acc = self.training_handler.train_model(
                self.model, self.train_iter, epoch)
            val_loss, val_acc = self.training_handler.eval_model(
                self.model, self.valid_iter)
            print(
                f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
            )
            output_handler.outputFileHandler.write(
                f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
            )

            patience -= 1
            if val_loss < min_valid_loss and abs(min_valid_loss -
                                                 val_loss) > 0.005:
                patience = patience_threshold
                torch.save(self.model,
                           "./saved_models/lstm-attn-" + self.embedding)
                min_valid_loss = val_loss

            if patience == 0:
                break

    def test(self):
        self.model = torch.load("./saved_models/lstm-attn-" + self.embedding)
        test_loss, test_acc = self.training_handler.eval_model(
            self.model, self.test_iter)
        return test_loss, test_acc
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        learning_rate = 2e-5

        self.model = LogisticRegressionModel(output_size, vocab_size, 300, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), weight_decay=0.0005, lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        hidden_size = 256
        embedding_length = 300

        self.model = RCNN(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), weight_decay=0.0005, lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)
Exemplo n.º 6
0
    def setUp(self):
        MockProtocol.set_handler_class(handler_class=LogonHandler)
        self.protocol = MockProtocol()
        training_handler.player_database = PlayerDatabase()
        self.player = Player(28)
        self.player.name = "jerry"
        training_handler.player_database.add_player(self.player)

        self.protocol.remove_handler()
        self.player.protocol = self.protocol
        self.protocol.add_handler(TrainingHandler(self.protocol, self.player))
        self.handler = self.protocol.handler
        self.assertEqual(len(list(training_handler.player_database.all())), 1)
        self.protocol.send_data = []
        self.maxDiff = None
Exemplo n.º 7
0
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(
            embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        in_channel = 1
        out_channel = 16
        kernel_heights = [3, 5, 7]
        keep_probab = 0
        stride = 1
        padding = [1, 2, 3]
        embedding_length = 300

        self.model = CNN(batch_size, output_size, in_channel, out_channel,
                         kernel_heights, stride, padding, keep_probab,
                         vocab_size, embedding_length, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            self.model.parameters()),
                                     weight_decay=0.0005,
                                     lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)
Exemplo n.º 8
0
class ModelServer(SocketServer):
    HOST = 'localhost'
    PORT = 5600

    def __init__(self, *args, **kwargs):
        self.model = BasketballModel()
        self.handler = TrainingHandler()
        self.status = 0
        self.last_connection_amount = 0
        self.running_time = datetime.now()
        self.memory = ReplayMemory(100000)
        self.csv = CSVFile()
        super(ModelServer, self).__init__(self.HOST, self.PORT)

    def on_message_received(self, sock: socket, data, received_data: str,
                            addr: Tuple[str, int]) -> None:
        request = json.loads(received_data)
        print('Received {} from {}'.format(request, addr))
        if is_correct_message(request):
            host, prt = addr
            conn = self.handler.get_connection(prt)
            if is_result(request):
                res_throw = float(request['throw'])
                res_force = float(request['force'])
                res_distance = float(request['distance'])
                self.csv.add_observation(res_throw, res_force, res_distance,
                                         (datetime.now() -
                                          self.running_time).total_seconds())
                self.memory.push(res_throw, res_force, res_distance)
                conn.result = res_distance
            elif is_request(request):
                conn.distance = float(request['distance'])

    def on_step(self):
        # If all the results from the throws are in,
        if self.handler.all_results_are_in():
            # Then let us learn from all the results
            self.model.learn(self.handler.predictions,
                             self.handler.get_all_results())
            # Clear the results so that we can receive fresh results
            self.handler.clear_results()
            del self.handler.predictions
            self.status = 0
        # If all the distances are in
        if self.handler.all_distances_are_in():
            # Then we can predict the force and height
            throws = self.model.throw(self.handler.get_all_distances())
            # PyTorch tries to be clever, but we need it in the right dimensions
            if len(throws.shape) <= 1:
                throws = throws.unsqueeze(0)
            # Add the predictions to the training handler for later
            self.handler.predictions = throws
            # And send them to all the connected clients
            for conn, throw in zip(self.handler.get_connections(), throws):
                # In order to send the tensor data over the network,
                # we must first convert the tensor to simple python
                # data types and then we can access them as normal.
                t = throw[0].tolist()
                # t = random.uniform(0.2, 1)
                self.send_prediction_to_connection(conn, t, t)
            # Clear distances afterwards
            self.handler.clear_distances()
            self.status = 1

    def on_connection_closed(self, addr: Tuple[str, int]):
        host, port = addr
        self.handler.remove_connection(port)

    def on_accept_connection(self, sock: socket, addr: Tuple[str, int],
                             data: SimpleNamespace):
        host, port = addr
        self.handler.add_connection(Connection(sock, host, port, data))

    def send_prediction_to_connection(self, conn: Connection, force: float,
                                      height: float) -> None:
        prediction = {'Type': 'prediction', 'Force': force, 'Height': height}
        self.send_message(conn.data, prediction)

    def ask_for_distances(self, conn: Connection) -> None:
        request = {'Type': 'request'}
        self.send_message(conn.data, request)
batch_size = 100
train_batches = 20000

charset = "data/charset.txt"
train_corpus = "data/train.txt"
tag_name = "2_256"

seq_length = 128
save_on_every = 100
epoches = 50

cwd = os.getcwd()
charset = os.path.join(cwd, charset)
train_corpus = os.path.join(cwd, train_corpus)

d = DataGen2(charset, batch_size, seq_length)
gen = d.generate_v1(train_corpus, batches=train_batches)

input_shape = (seq_length, len(d.char2int) + 1)
output_shape = len(d.char2int) + 1

model = get_model(input_shape, output_shape, lstm_cell=True)

model_name = "single_input_single_task"
trainer = TrainingHandler(model, model_name)
trainer.train(tag_name, gen, epoches, 
              train_batches, save_on_every,
              save_model=True)


Exemplo n.º 10
0
 def goto_train(self):
     self.player.active = False
     self.protocol.add_handler(TrainingHandler(self.protocol, self.player))
     self.logout_message("%s leaves to edit stats" % self.player.name)