Esempio n. 1
0
def main():
    x_train, y_train, x_test, y_test = data.mnist(one_hot=True)

    # Define Deep Neural Network structure (input_dim, num_of_nodes)
    layers = [[x_train.shape[1], 256], [256, 128], [128, 64]]

    # Initialize a deep neural network

    dnn = DNN(MODEL_FOLDER, os_slash, layers, params)

    pre_epochs = 100
    train_epochs = 100

    # Create auto-encoders and train them one by one by stacking them in the DNN
    pre_trained_weights = dnn.pre_train(x_train, pre_epochs)

    # Then use the pre-trained weights of these layers as initial weight values for the MLP
    history = dnn.train(x_train,
                        y_train,
                        train_epochs,
                        init_weights=pre_trained_weights)

    plot.plot_loss(history, loss_type='MSE')

    predicted, score = dnn.test(x_test, y_test)

    print("Test accuracy: ", score[1])

    dnn.model.save_weights(MODEL_FOLDER + os_slash + "final_weights.h5")
    dnn.model.save(MODEL_FOLDER + os_slash + "model.h5")
    save_results(score[1])
Esempio n. 2
0
def main():
    options = {
        'learning_rate': 0.1,
        'beta1': 0.9,
        'optimizer': 'gd',
        'loss': 'crossentropy'
    }

    train_x, test_x, train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes = load_data(
    )

    X = np.array([[1, 2], [1, 2], [4, 2]])
    Y = np.array([[0], [0], [0]])

    print(train_x.shape)
    print(test_x.shape)
    print(train_set_y_orig.shape)

    print(train_set_y_orig[0, 0:10])

    layers = [
        Dense(32, activation='relu'),
        Dense(5, activation='relu'),
        Dense(1, activation='sigmoid')
    ]

    print(len(layers))

    dnn = DNN(train_x, train_set_y_orig, layers, options)

    print(dnn.params.keys())

    #for param in sorted(dnn.params):
    #	print(param, dnn.params[param].shape)

    print(dnn)
    print(dnn.loss(dnn.predict(test_x), test_set_y_orig))

    dnn.train()
Esempio n. 3
0
        W_init.append(w_rbm)

    opts.W_init = W_init

print "Build DNN structure..."
dnn = DNN(opts)

# training
print "Start DNN training...(lr_decay = %s)" % (str(opts.lr_decay))

acc_all = []
lr = opts.learning_rate
ts = time.time()
for i in range(opts.epoch):

    dnn.train(X_train, Y_train, opts.batch_size, lr)

    acc = np.mean(np.argmax(Y_valid, axis=1) == dnn.predict(X_valid))
    acc_all.append(acc)

    print "Epoch %d, lr = %.4f, accuracy = %f" % (i + 1, lr, acc)

    # dump intermediate model and log per 100 epoch
    if (np.mod((i + 1), 100) == 0):
        model_filename = os.path.join(opts.model_dir,
                                      "epoch%d.model" % (i + 1))
        dnn_save_model(model_filename, dnn)

        log_filename = '../log/%s.log' % parameters
        print "Save %s" % log_filename
        np.savetxt(log_filename, acc_all, fmt='%.7f')
Esempio n. 4
0
    def parse_individual(self, indi):
        torch_device = torch.device('cuda')

        train_data, test_data, user_num, item_num, train_mat = data_loader.load_dataset(
        )
        train_dataset = Data(train_data,
                             item_num,
                             train_mat,
                             num_ng=4,
                             is_training=True)  # neg_items=4,default
        test_dataset = Data(test_data,
                            item_num,
                            train_mat,
                            num_ng=0,
                            is_training=False)  # 100

        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=256,
                                                   shuffle=True,
                                                   num_workers=4)

        dnn = DNN(int(user_num), int(item_num), factor_num=8,
                  indi=indi)  ##################
        dnn.cuda()
        print(dnn)
        #complexity = get_total_params(dnn.cuda(), (220, 30, 30))  ########todo: change the inpupt size

        # Loss and optimizer 3.定义损失函数, 使用的是BCEWithLogitsLoss
        criterion = nn.BCEWithLogitsLoss()
        criterion = criterion.to(torch_device)

        # 4.定义迭代优化算法, 使用的是Adam
        learning_rate = 0.002  #########################
        optimizer = torch.optim.Adam(dnn.parameters(),
                                     lr=learning_rate)  ##########
        loss_dict = []
        num_epochs = train_loader.__len__()
        # Train the model 5. 迭代训练
        dnn.train()
        train_loader.dataset.ng_sample()
        for i, data in enumerate(train_loader, 0):
            # Convert numpy arrays to torch tensors  5.1 准备tensor的训练数据和标签
            user = data[0].cuda()
            item = data[1].cuda()
            label = data[2].float().cuda()

            # Forward pass  5.2 前向传播计算网络结构的输出结果
            optimizer.zero_grad()
            dnn.zero_grad()  ##########
            output = dnn(user, item)
            # 5.3 计算损失函数
            loss = criterion(output, label)
            loss.cuda()

            # Backward and optimize 5.4 反向传播更新参数
            loss.backward()
            optimizer.step()

            # 可选 5.5 打印训练信息和保存loss
            loss_dict.append(loss.item())
            if (i + 1) % 5000 == 0:
                print('Epoch [{}/{}], Loss: {:.4f}'.format(
                    i + 1, num_epochs, loss.item()))

        # evaluate
        dnn.eval()
        #test_loss_dict = []
        # every user have 99 negative items and one positive items,so batch_size=100
        test_loader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=99 + 1,
                                                  shuffle=False,
                                                  num_workers=2)
        #test_loader.dataset.ng_sample()
        #for i, data in enumerate(test_loader, 0):
        #user = data[0].cuda()
        #item = data[1].cuda()
        #label = data[2].float().cuda()
        #output = dnn(user, item)
        #loss = criterion(output, label)
        #loss = loss.cuda()
        #test_loss_dict.append(loss.item())

        HR = utils.metricsHR(dnn, test_loader, top_k=10)
        NDCG = utils.metricsNDCG(dnn, test_loader, top_k=10)

        #mean_test_loss = np.mean(test_loss_dict)
        #std_test_loss = np.std(test_loss_dict)
        print("HR:{},NDCG:{}".format(HR, NDCG))
        return HR, NDCG