Пример #1
0
    def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
                    batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit([self.A, self.L], [self.A, self.L], batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose, shuffle=False)
        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(i * batch_size, min((i + 1) * batch_size, self.node_size))

                    train_A = self.A[index, :]  # batch_size * node_size
                    train_L = self.L[index][:, index] # L矩阵是一个对角矩阵

                    inp = [train_A, train_L]
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch
                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]
                epoch_time = int(time.time() - start_time)
                hist.on_epoch_end(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                    print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
                        epoch_time, losses[0], losses[1], losses[2]))
            return hist
Пример #2
0
    def train(self, batch_size=1024, epochs=2, initial_epoch=0, verbose=1):
        """Train SDNE model.

        Parameters
        ----------
        batch_size : int, optional (default : 1024)

        epochs : int, optional (default : 2)

        inital_epoch : int, optional (default : 0)

        verbose : int, optional (default : 1)

        """
        from tensorflow.python.keras.callbacks import History

        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
                    batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit([self.A.todense(), self.L.todense()], [self.A.todense(), self.L.todense()],
                                  batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose,
                                  shuffle=False, )
        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(
                        i * batch_size, min((i + 1) * batch_size, self.node_size))
                    A_train = self.A[index, :].todense()
                    L_mat_train = self.L[index][:, index].todense()
                    inp = [A_train, L_mat_train]
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch

                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]
                epoch_time = int(time.time() - start_time)
                # TODO: Fixed the bug derivated by the following code in TF2:
                # hist.on_epoch_end(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                    print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
                        epoch_time, losses[0], losses[1], losses[2]))
            return hist
Пример #3
0
    def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
        print(self.model.summary())
        # return
        if batch_size >= self.node_size:
            if batch_size > self.node_size:
                print('batch_size({0}) > node_size({1}), set batch_size = {1}'.
                      format(batch_size, self.node_size))
                batch_size = self.node_size
            return self.model.fit(
                [self.A.todense(), self.L.todense()],
                [self.A.todense(), self.L.todense()],
                batch_size=batch_size,
                epochs=epochs,
                initial_epoch=initial_epoch,
                verbose=verbose,
                shuffle=False,
            )

        else:
            steps_per_epoch = (self.node_size - 1) // batch_size + 1
            hist = History()
            hist.on_train_begin()
            logs = {}
            for epoch in range(initial_epoch, epochs):
                start_time = time.time()
                losses = np.zeros(3)
                for i in range(steps_per_epoch):
                    index = np.arange(
                        i * batch_size,
                        min((i + 1) * batch_size, self.node_size))
                    A_train = self.A[index, :].todense()

                    L_mat_train = self.L[index][:, index].todense()
                    inp = [A_train, L_mat_train]
                    print("A_train: ", A_train)
                    print("L_mat_train: ", L_mat_train)
                    batch_losses = self.model.train_on_batch(inp, inp)
                    losses += batch_losses
                losses = losses / steps_per_epoch

                logs['loss'] = losses[0]
                logs['2nd_loss'] = losses[1]
                logs['1st_loss'] = losses[2]

                epoch_time = int(time.time() - start_time)
                hist.on_epoch_begin(epoch, logs)
                if verbose > 0:
                    print('Epoch {0}/{1}'.format(epoch + 1, epochs))
                print(
                    '{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_lost: {3: .4f}'
                    .format(epoch_time, losses[0], losses[1], losses[2]))
        return hist