Exemple #1
0
def train(model, epoch=1000, learning_rate=1e-3, steps_per_epoch=steps_per_epoch):
    # 开始训练
    losses = []
    loss_min = np.inf
    graph_sort = toplogical_sort(model.feed_dict)  # 拓扑排序
    optim = Nadam(graph_sort)
    update_lr = Auto_update_lr(lr=learning_rate, alpha=0.1, patiences=500, print_=True)
    for e in range(epoch):
        loss = 0
        for b in range(steps_per_epoch):
            X_batch, y_batch = resample(x_, y_, n_samples=batch_size)
            # print(x_.shape,X_batch.shape)
            mlp.x.value = X_batch  # 在这更新值
            mlp.y.value = y_batch[:, None]
            # print(X_batch.shape)
            run_steps(graph_sort, monitor=False)

            optim.update(learning_rate=learning_rate)
            Visual_gradient(model)
            Grad_Clipping_Disappearance(model, 5)

            loss += mlp.MSE_loss.value
        update_lr.updata(loss/steps_per_epoch)
        print("epoch:{}/{},loss:{:.6f}".format(e,epoch,loss / steps_per_epoch))
        losses.append(loss / steps_per_epoch)
        if loss / steps_per_epoch < loss_min:
            print('loss is {:.6f}, is decreasing!! save moddel'.format(loss / steps_per_epoch))
            save_model("model/mlp.xhp", model)
            loss_min = loss / steps_per_epoch
    print('The min loss:',loss_min)
    # print("loss:{}".format(np.mean(losses)))
    plt.plot(losses)
    plt.savefig("image/many_vectoy.png")
    plt.show()
Exemple #2
0
def train(model, train_data, epoch=6, learning_rate=1e-3):
    # 开始训练
    losses = []
    loss_min = np.inf
    graph_sort_lstm = toplogical_sort(model.feed_dict)  # 拓扑排序
    optim = Adam(graph_sort_lstm)
    update_lr = Auto_update_lr(lr=learning_rate,
                               alpha=0.1,
                               patiences=20,
                               print_=True)
    for e in range(epoch):
        for X, Y in train_data:
            X, Y = X.numpy(), Y.numpy()
            model.x.value = X
            model.y.value = Y
            run_steps(graph_sort_lstm)
            # if model.y_pre.value is not None:
            # print(model.y_pre.value.shape,Y.shape)
            learning_rate = update_lr.lr
            optim.update(learning_rate=learning_rate)
            Visual_gradient(model)
            Grad_Clipping_Disappearance(model, 5)
            loss = model.MSE_loss.value
            losses.append(loss)
        update_lr.updata(np.mean(np.mean(losses)))
        print("epoch:{}/{},loss:{:.6f}".format(e, epoch, np.mean(losses)))
        if np.mean(losses) < loss_min:
            print('loss is {:.6f}, is decreasing!! save moddel'.format(
                np.mean(losses)))
            save_model("model/lstm.xhp", model)
            loss_min = np.mean(losses)
    print('min loss:', loss_min)
    plt.plot(losses)
    plt.savefig("image/lstm_loss.png")
    plt.show()
def predict(x, model):
    graph = toplogical_sort(model.feed_dict)
    model.x.value = x
    run_steps(graph, train=False, valid=False)
    y = graph[-2].value
    result = np.argmax(y, axis=1)

    return result
def train(model, train_data, epoch=4000, learning_rate=0.0128):
    #开始训练
    accuracies = []
    losses = []
    losses_valid = []
    accuracies_valid = []
    loss_min = np.inf
    graph_sort_class = toplogical_sort(model.feed_dict)  # 拓扑排序
    optim = Adam(graph_sort_class)
    update_lr = Auto_update_lr(lr=learning_rate,
                               alpha=0.1,
                               patiences=200,
                               print_=True)
    for e in range(epoch):
        for X, Y in train_data:
            X, Y = X.unsqueeze(1).numpy(), Y.numpy()
            model.x.value = X
            model.y.value = Y
            run_steps(graph_sort_class)
            learning_rate = update_lr.lr
            optim.update(learning_rate=learning_rate)
            Visual_gradient(model)
            Grad_Clipping_Disappearance(model, 5)
            loss = model.cross_loss.value
            accuracy = model.cross_loss.accuracy
            losses.append(loss)
            accuracies.append(accuracy * 100)
        for x, y in valid_loader:
            x, y = x.unsqueeze(1).numpy(), y.numpy()
            model.x.value = x
            model.y.value = y
            run_steps(graph_sort_class, train=False, valid=True)
            loss_valid = model.cross_loss.value
            accuracy_valid = model.cross_loss.accuracy
            losses_valid.append(loss_valid)
            accuracies_valid.append(accuracy_valid * 100)
        update_lr.updata(np.mean(losses_valid))
        print(
            "epoch:{}/{},train loss:{:.8f},train accuracy:{:.6f}%,valid loss:{:.8f},valid accuracy:{:.6f}%"
            .format(e, epoch, np.mean(losses), np.mean(accuracies),
                    np.mean(losses_valid), np.mean(accuracies_valid)))
        if np.mean(losses_valid) < loss_min:
            print('loss is {:.6f}, is decreasing!! save moddel'.format(
                np.mean(losses_valid)))
            save_model("model/lstm_class.xhp", model)
            loss_min = np.mean(losses_valid)
    #save_model("lstm_class.xhp",model)
    plt.plot(losses)
    plt.savefig("image/lstm_class_loss.png")
    plt.show()
Exemple #5
0
def evaluator(test_loader,model):
    graph = toplogical_sort(model.feed_dict)
    accuracies = []
    losses = []
    for x, y in test_loader:
        x, y = x.numpy(), y.numpy()
        model.x.value = x
        model.y.value = y
        run_steps(graph, train=False, valid=True)
        loss_test = model.cross_loss.value
        accuracy_test = model.cross_loss.accuracy
        losses.append(loss_test)
        accuracies.append(accuracy_test)
    print("test loss:{},test accuracy:{}".format(np.mean(losses),np.mean(accuracies)))
Exemple #6
0
            Visual_gradient(model)
            Grad_Clipping_Disappearance(model, 5)

            loss += mlp.MSE_loss.value
        update_lr.updata(loss/steps_per_epoch)
        print("epoch:{}/{},loss:{:.6f}".format(e,epoch,loss / steps_per_epoch))
        losses.append(loss / steps_per_epoch)
        if loss / steps_per_epoch < loss_min:
            print('loss is {:.6f}, is decreasing!! save moddel'.format(loss / steps_per_epoch))
            save_model("model/mlp.xhp", model)
            loss_min = loss / steps_per_epoch
    print('The min loss:',loss_min)
    # print("loss:{}".format(np.mean(losses)))
    plt.plot(losses)
    plt.savefig("image/many_vectoy.png")
    plt.show()


train(mlp)

load_model("model/mlp.xhp",mlp)

def predict(x_rm, graph,model):
    model.x.value = x_rm
    run_steps(graph, monitor=False, train=False,valid=False)

    return model.y_pre.value*std_y + mean_y
graph_sort = toplogical_sort(mlp.feed_dict)
print("预测值:",predict(x_[17:50],graph_sort,mlp),"真实值:",y_[17:50]*std_y + mean_y)