Exemplo n.º 1
0
def main():
    conf_path = path.dirname(path.abspath(__file__)) + '/config.json'
    conf = json.load(open(conf_path, 'r'))

    Logger.print_info('Preparing for screen capture...')
    capturing_mgr = CapturingManager(
        display_num=conf['vnc']['number'],
        width=conf['vnc']['width'],
        height=conf['vnc']['height'],
        depth=conf['vnc']['depth'],
        loglevel=conf['ffmpeg']['loglevel'],
        fps=conf['ffmpeg']['record_fps'],
        comp_thre_num=conf['compression']['thread_num'],
        raw_queue_size=conf['compression']['raw_frame_queue'],
        comp_queue_size=conf['compression']['comp_frame_queue'],
        quality=conf['compression']['quality'])
    capturing_mgr.init()

    Logger.print_info('Preparing for server connection...')
    ws_io = WebSocketIO(ip=conf['server']['ip'],
                        port=conf['server']['port'],
                        ws_tag=WS_TAG,
                        ws_id=WS_ID,
                        ws_timeout=WS_TIMEOUT)

    streamer = FrameStreamer(ws_io=ws_io,
                             capturing_mgr=capturing_mgr,
                             width=conf['vnc']['width'],
                             height=conf['vnc']['height'])
    ws_io.open(streamer.on_open)
Exemplo n.º 2
0
class Trainer(object):
    def __init__(self, **kwargs):
        self.parameters = kwargs
        self.logger = Logger(**kwargs)
        self.data_loader = DataLoader(**kwargs)
        self.model = GCN(input_dim=self.data_loader.get_input_feat_size(),
                         hidden_dim=kwargs['hidden_dim'],
                         num_classes=self.data_loader.get_num_classes(),
                         dropout_prob=kwargs['dropout_prob'],
                         bias=kwargs['bias'])
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=kwargs['lr'])
        self.cross_entropy = torch.nn.NLLLoss()

    def train(self):

        adj_matrix, feat_matrix, labels, _, val_indices, train_indices = self.data_loader.get_data(
        )

        for epoch in range(self.parameters['num_epochs']):
            # training
            train_acc, train_loss = self.train_step(adj_matrix, feat_matrix,
                                                    labels, train_indices)

            # validation
            val_loss, val_acc = self.inference_step(adj_matrix, feat_matrix,
                                                    labels, val_indices)

            # logging
            self.logger.print_info(epoch + 1, train_loss.detach(), train_acc,
                                   val_loss, val_acc)
            self.logger.push_early_stopping(val_loss, self.model)

            if self.logger.early_stopping.early_stop:
                print("Early stopping")
                break

        # if the val_loss improves all epochs, we save the weights of the last model
        self.logger.early_stopping.save_checkpoint(val_loss, self.model)

    def train_step(self, adj_matrix, feat_matrix, labels, train_indices):
        self.model.train()
        self.optimizer.zero_grad()
        out = self.model(adj_matrix, feat_matrix)
        train_loss = self.loss(out[train_indices], labels[train_indices])
        train_acc = utils.calculate_accuracy(out[train_indices],
                                             labels[train_indices])
        train_loss.backward()
        self.optimizer.step()
        return train_acc, train_loss

    def test(self):
        # load the model saved at early stopping point
        state_dict_agent = torch.load(self.logger.save_folder +
                                      '/checkpoint.pt',
                                      map_location='cpu')
        self.model.load_state_dict(state_dict_agent)
        # inference test set
        adj_matrix, feat_matrix, labels, test_indices, val_indices, train_indices = self.data_loader.get_data(
        )
        test_loss, test_acc = self.inference_step(adj_matrix, feat_matrix,
                                                  labels, test_indices)
        print('\rTest Loss: {}, Test Acc: {}'.format(test_loss, test_acc))

    def inference_step(self, adj_matrix, feat_matrix, labels, indices):
        """
        Forward matrix and features and calculate loss and accuracy for the labels
        """
        self.model.eval()
        out = self.model(adj_matrix, feat_matrix)
        loss = self.loss(out[indices], labels[indices]).detach()
        acc = utils.calculate_accuracy(out[indices], labels[indices])
        return loss, acc

    def loss(self, predictions, labels):
        # calculate cross entropy loss with L2 regularization for the first layer parameters
        l2_reg = self.parameters['weight_decay'] * torch.sum(
            self.model.layer1.weights**2)
        loss = self.cross_entropy(predictions, labels) + l2_reg
        return loss