Пример #1
0
def main():
    options = {
        'learning_rate': 0.1,
        'beta1': 0.9,
        'optimizer': 'gd',
        'loss': 'crossentropy'
    }

    train_x, test_x, train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes = load_data(
    )

    X = np.array([[1, 2], [1, 2], [4, 2]])
    Y = np.array([[0], [0], [0]])

    print(train_x.shape)
    print(test_x.shape)
    print(train_set_y_orig.shape)

    print(train_set_y_orig[0, 0:10])

    layers = [
        Dense(32, activation='relu'),
        Dense(5, activation='relu'),
        Dense(1, activation='sigmoid')
    ]

    print(len(layers))

    dnn = DNN(train_x, train_set_y_orig, layers, options)

    print(dnn.params.keys())

    #for param in sorted(dnn.params):
    #	print(param, dnn.params[param].shape)

    print(dnn)
    print(dnn.loss(dnn.predict(test_x), test_set_y_orig))

    dnn.train()
Пример #2
0
def train(data, path):
    os.system("mkdir -p " + path)
    x_train, y_train = data['x'], data['y']
    #pdb.set_trace()
    x = tf.placeholder(tf.float32, [None, 11, input_dim], name='x')

    y = tf.placeholder(tf.float32, [None, input_dim], name='y')

    model = DNN()
    loss = model.loss(x, y)
    pred = model.predict(x)
    tf.add_to_collection('pred', pred)
    tf.add_to_collection('loss', loss)

    optimize = tf.train.AdamOptimizer(learning_rate=5e-5,
                                      beta1=0.9,
                                      beta2=0.999).minimize(loss)
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        start = time.time()
        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver()
        writer = tf.summary.FileWriter(path + 'logs', sess.graph)

        err_old = 100
        for i in range(iter_num):
            err_new = 0
            count = 0
            for j in range(len(x_train)):
                idx = random.randint(0, len(x_train) - 1)
                #pdb.set_trace()
                xt = next_batch(x_train[idx])
                yt = y_train[idx % len(y_train)][5:-5, :]
                k = 0
                for k in range(0, len(xt), batch_num):
                    xb = xt[k:k + batch_num]
                    yb = yt[k:k + batch_num]
                    _ = sess.run([optimize], feed_dict={x: xb, y: yb})
                xb = xt[k:]
                yb = yt[k:]
                err, result = sess.run([loss, merged],
                                       feed_dict={
                                           x: xb,
                                           y: yb
                                       })
                err_new += err
                count += 1
                if j % 100 == 0:
                    writer.add_summary(result, len(x_train) * i + j)
            if err_new / count < err_old:
                err_old = err_new / count
                saver.save(sess, path + 'test_best_model')
                #print('Epoch [%4d] Iter [%4d] Time [%5.4f] \nLoss: [%.4f]' %
                #  (i+1, j, time.time() - start, err))
                print('Epoch [%4d] Time [%10.4f] Loss: [%.4f]: Saved ' %
                      (i + 1, time.time() - start, err_new / count))
            else:
                print('Epoch [%4d] Time [%5.4f] Loss: [%.4f]: No save ' %
                      (i + 1, time.time() - start, err_new / count))