Exemplo n.º 1
0
def backward():
    #外标
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))

    #数据
    X, Y_, Y_c = opt4_8_generateds.generateds()

    #复现网络结构,推测输出y
    y = forward.forward(x, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)

    #定义损失,我用均方误差
    loss_mse = tf.reduce_mean(tf.square(y - y_))
    #加入正则化
    loss = loss_mse + tf.add_n(tf.get_collection('losses'))

    #我是用指数衰减学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               300 / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    #session结构初始化参数
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i * BATCH_SIZE) % 300
            end = start + BATCH_SIZE
            sess.run(train_step,
                     feed_dict={
                         x: X[start:end],
                         y_: Y_[start:end]
                     })
            if i % 2000 == 0:
                print("经过%d轮训练,损失为%f" %
                      (i, sess.run(loss, feed_dict={
                          x: X,
                          y_: Y_
                      })))

        #画出来
        #在-3到3之间以0.01为间距画网格
        xx, yy = np.mgrid[-3:3:.01, -3:3:0.01]
        grid = np.c_[xx.ravel(), yy.ravel()]
        probs = sess.run(y, feed_dict={x: grid})
        probs = probs.reshape(xx.shape)
    #画点
    plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[.5])
    plt.show()

    pass
Exemplo n.º 2
0
def backward():
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))

    # 生成数据集
    X, Y_, Y_c = opt4_8_generateds.generateds()

    # 前向传播
    y = opt4_8_forward.forward(x, REGULARIZER)

    # 迭代基数器,定义为不可训练 - 指数衰减学习率
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               300 / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)

    # 正则化之后的代价函数
    loss_mse = tf.reduce_mean(tf.square(y - y_))
    loss_total = loss_mse + tf.add_n(
        tf.get_collection('losses'))  # 正则化之后所有权重w代价函数的和

    # 定义反向传播算法 - 包含正则化
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i * BATCH_SIZE) % 300
            end = start + BATCH_SIZE
            sess.run(train_step,
                     feed_dict={
                         x: X[start:end],
                         y_: Y_[start:end]
                     })
            if i % 2000 == 0:
                loss_value = sess.run(loss_total, feed_dict={x: X, y_: Y_})
                print("After %d train steps, loss is %f :" % (i, loss_value))

        xx, yy = np.mgrid[-3:3:0.01, -3:3:0.01]
        grid = np.c_[xx.ravel(), yy.ravel()]
        probs = sess.run(y, feed_dict={x: grid})
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[0.5])
    plt.show()
def backward():
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))

    X, Y_, Y_c = opt4_8_generateDS.generateds()

    y = opt4_8_forward.forward(x, REGULARIZER)

    global_step = tf.Variable(0, trainable=False)

    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               300 / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)

    loss_mse = tf.reduce_mean(tf.square(y - y_))
    loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))

    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_step = optimizer.minimize(loss_total)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i * BATCH_SIZE) % 300
            end = start + BATCH_SIZE
            sess.run(train_step,
                     feed_dict={
                         x: X[start:end],
                         y_: Y_[start:end]
                     })
            if i % 2000 == 0:
                loss_v = sess.run(loss_total, feed_dict={x: X, y_: Y_})
                print("After {} steps, loss is {}".format(i, loss_v))
        xx, yy = np.mgrid[-3:3:.01, -3:3:.01]
        # 将xx,yy拉直,并合成一个2列的矩阵,得到一个网格坐标点的集合
        grid = np.c_[xx.ravel(), yy.ravel()]
        # 将网格坐标点喂入神经网络,获得预测结果
        probs = sess.run(y, feed_dict={x: grid})
        # 将probs的shape调整为xx的样子
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[.5])
    plt.show()
Exemplo n.º 4
0
def backward():#反向传播
	x = tf.placeholder(tf.float32, shape=(None, 2))#占位
	y_ = tf.placeholder(tf.float32, shape=(None, 1))#占位
	#X:300行2列的矩阵。Y_:坐标的平方和小于2,给Y赋值1,其余赋值0
	X, Y_, Y_c = opt4_8_generateds.generateds()
	
	y = opt4_8_forward.forward(x, REGULARIZER)#前向传播计算后求得输出y
	
	global_step = tf.Variable(0,trainable=False)#轮数计数器	
	#指数衰减学习率
	learning_rate = tf.train.exponential_decay(
		LEARNING_RATE_BASE,#学习率
		global_step,#计数
		300/BATCH_SIZE,
		LEARNING_RATE_DECAY,#学习衰减lü
		staircase=True)#选择不同的衰减方式


	#定义损失函数
	loss_mse = tf.reduce_mean(tf.square(y-y_))#均方误差
	loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))#正则化
	
	#定义反向传播方法:包含正则化
	train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)

	with tf.Session() as sess:
		init_op = tf.global_variables_initializer()#初始化
		sess.run(init_op)#初始化
		for i in range(STEPS):
			start = (i*BATCH_SIZE) % 300
			end = start + BATCH_SIZE#3000轮
			sess.run(train_step, feed_dict={x: X[start:end], y_:Y_[start:end]})
			if i % 2000 == 0:
				loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_})
				print("After %d steps, loss is: %f" %(i, loss_v))

		xx, yy = np.mgrid[-3:3:.01, -3:3:.01]#网格坐标点
		grid = np.c_[xx.ravel(), yy.ravel()]
		probs = sess.run(y, feed_dict={x:grid})
		probs = probs.reshape(xx.shape)
	
	plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) #画点
	plt.contour(xx, yy, probs, levels=[.5])#画线
	plt.show()#显示图像
def backward():
	x = tf.placeholder(tf.float32, shape=(None, 2))
	y_ = tf.placeholder(tf.float32, shape=(None, 1))

	X, Y_, Y_c = opt4_8_generateds.generateds()

	y = opt4_8_forward.forward(x, REGULARIZER)
	
	global_step = tf.Variable(0,trainable=False)	

	learning_rate = tf.train.exponential_decay(
		LEARNING_RATE_BASE,
		global_step,
		300/BATCH_SIZE,
		LEARNING_RATE_DECAY,
		staircase=True)


	#定义损失函数
	loss_mse = tf.reduce_mean(tf.square(y-y_))
	loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
	
	#定义反向传播方法:包含正则化
	train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)

	with tf.Session() as sess:
		init_op = tf.global_variables_initializer()
		sess.run(init_op)
		for i in range(STEPS):
			start = (i*BATCH_SIZE) % 300
			end = start + BATCH_SIZE
			sess.run(train_step, feed_dict={x: X[start:end], y_:Y_[start:end]})
			if i % 2000 == 0:
				loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_})
				print("After %d steps, loss is: %f" %(i, loss_v))

		xx, yy = np.mgrid[-3:3:.01, -3:3:.01]
		grid = np.c_[xx.ravel(), yy.ravel()]
		probs = sess.run(y, feed_dict={x:grid})
		probs = probs.reshape(xx.shape)
	
	plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 
	plt.contour(xx, yy, probs, levels=[.5])
	plt.show()
Exemplo n.º 6
0
def backward():
    #占位
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))
    #参数获取
    X, Y_, Y_c = opt4_8_generateds.generateds()
    y = opt4_8_forward.forward(x, GULARIZER)
    #计数器初始化
    golbal_step = tf.Variable(0, trainable=False)
    #生成指数衰减学习率
    lreaning_rate = tf.train.exponential_decay(LREANING_RATE_BASH,
                                               golbal_step,
                                               BASH_SIZE,
                                               LREANING_RATE_DECAY,
                                               staircase=True)

    #定义损失函数
    loss_mse = tf.reduce_mean(tf.square(y_ - y))
    loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
    #使用梯度下降法
    train_step = tf.train.AdamOptimizer(lreaning_rate).minimize(loss_total)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i * BASH_SIZE) % 300
            end = start + BASH_SIZE
            sess.run(train_step,
                     feed_dict={
                         x: X[start:end],
                         y_: Y_[start:end]
                     })
            #if i % 2000 == 0
        xx, yy = np.mgrid[-3:3:.01, -3:3:0.1]
        grid = np.c_[xx.ravel(), yy.ravel()]
        probs = sess.run(y, feed_dict={x: grid})
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[.5])
    plt.show()
Exemplo n.º 7
0
def backward():

    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ - tf.placeholder(tf.float32, shape=(None, 1))

    X,Y_,Y_c = opt4_8_generateds.generateds()
    y = opt4_8_forward.forward(x, REGUAL=RIZER)
    
    global_step = tf.Variable(0, trainalbe=False)

    learning_rate = tf.train.exponential_decay(
            LEARNING_RATE_BASE,
            global_step, 
            300/BATCH_SIZE,
            LEARNING_RATE_DECAY,
            staircase=True)

    loss_mse = tf.reduce_mean(tf.square(y-y_))
    loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))

    train_step = 
                tf.train.AdamOptimizer(learning_rate).minimize(loss_total)