Beispiel #1
0
# Reference: http://www.deepideas.net/deep-learning-from-scratch-i-computational-graphs/
import tensorflux.graph as tfg
import tensorflux.networks as tfn
import tensorflux.enums as tfe
import datasource.simple as simple_data

n = tfn.Single_Neuron_Network(input_size=1, output_size=1)

x = tfg.Placeholder(name="x")
target = tfg.Placeholder(name="target")
n.set_data(x, target)
n.initialize_scalar_param(5.0, -1.0)
n.layering(activator=tfe.Activator.ReLU.value)
n.set_optimizer(optimizer=tfe.Optimizer.SGD.value, learning_rate=0.01)

#n.draw_and_show()

data = simple_data.Simple_Function_Data()

n.print_feed_forward(num_data=data.num_train_data,
                     input_data=data.train_input,
                     target_data=data.train_target,
                     verbose=False)

#n.learning(max_epoch=500, data=data, bp=False, print_period=10, verbose=False)
n.learning(max_epoch=500, data=data, bp=True, print_period=10, verbose=False)

n.print_feed_forward(num_data=data.num_test_data,
                     input_data=data.test_input,
                     target_data=data.test_target,
                     verbose=False)
Beispiel #2
0
from tensorflux import graph as tfg
from tensorflux import session as tfs
import networkx as nx
import matplotlib.pyplot as plt

# g= graph.Graph()
g = tfg.Graph()
g.initialize()

# Create variables
a = tfg.Variable(5.0, name='a')
b = tfg.Variable(1.0, name='b')

# Create placeholder
x = tfg.Placeholder(name='x')

# Create hidden node y
y = tfg.Mul(a, x, name="y")

# Create output node z
z = tfg.Add(y, b, name="z")

#nx.draw_networkx(g, with_labels=True)
#plt.show(block=True)

session = tfs.Session()
output = session.run(z, {x: 1.0})
print(output)

print(z.input_nodes[0], z.input_nodes[1])
print(z.output)
Beispiel #3
0
# Reference: http://www.deepideas.net/deep-learning-from-scratch-i-computational-graphs/
import tensorflux.graph as tfg
import tensorflux.networks as tfn
import tensorflux.enums as tfe
import datasource.simple as simple_data

n = tfn.Single_Neuron_Network(input_size=1, output_size=1)

# ctrl + B : 해당 함수, 클래스의 원형으로 찾아가는 것
# ctrl + / : 지정한 범위 주석처리
x = tfg.Placeholder(name="x")  # 나중에 값이 지정되는 위치
target = tfg.Placeholder(name="target")
n.set_data(x, target)   # 입력 노드와 출력 노드를 설정

n.initialize_scalar_param(5.0, -1.0)    #(가중치, 바이어스)
n.layering(activator=tfe.Activator.ReLU.value)  # 층을 쌓는 것. -> u 연산 : Affine, f(u) 연산 : Relu
#     (x) -> (affine) -> (Relu) -> (SGD) -> (target)
# (weight)-^-(bias)                 ^-(optimizer)
n.set_optimizer(optimizer=tfe.Optimizer.SGD.value, learning_rate=0.01)  # Optimizer는 30번째 라인인 n.learning에서 수행됨
# 최적화기를 설정 : SGD 최적화기 사용

data = simple_data.Simple_Function_Data()

# 훈련 데이터로 테스트
n.print_feed_forward(num_data=data.num_train_data,
                     input_data=data.training_input,
                     target_data=data.training_target,
                     x=x)

n.learning(max_epoch=100,   # epoch : 횟수?
           data=data,       # data : 사용할 데이터?
Beispiel #4
0
# Create hidden node y
y = tfg.Mul(a, x, name="y")

# Create output node z
z = tfg.Add(y, b, name="z") #z : operation

session = tfs.Session()
output = session.run(z, {x: 1.0})
print(output)
"""

# Create variables
A = tfg.Variable([[1, 0], [0, -1]], name="a")
b = tfg.Variable([1, 1], name="b")

# Create placeholder
x = tfg.Placeholder(name="x")

# Create hidden node y
y = tfg.Matmul(A, x, name="y")  #a, x, b가 행렬이기 때문에 mul이 아니라 matmul

# Create output node z
z = tfg.Add(y, b, name="z")

nx.draw_networkx(g, with_labels=True)
plt.show(block=True)

session = tfs.Session()
output = session.run(z, {x: [1, 2]})
print(output)
Beispiel #5
0
from tensorflux import graph as tfg
from tensorflux import session as tfs

#import networkx as nx
#import matplotlib.pyplot as plt

g = tfg.Graph()  # 컴퓨테이션 그래프 클래스를 생성
#g.initialize() # 컴퓨테이션 그래프 클래스를 초기화
#"""
# Create variables
a = tfg.Variable(5.0, name="a")  #변수 클래스 a를 생성하고 초기화
b = tfg.Variable(-1.0, name="b")  #변수 클래스 b를 생성하고 초기화

# Create placeholder
x = tfg.Placeholder(name="x")  #플레이스홀더는 이후 입력할 값으로 현재는 값까지는 초기화하지 않음

# Create hidden node y
y = tfg.Mul(a, x, name="y")  #곱 연산 클래스를 생성

# Create output node z
z = tfg.Add(y, b, name="z")  #합 연산 클래스를 생성

#nx.draw_networkx(g, with_labels=True)
#plt.show(block=True)

# 수행~~~~~~~~~~~~~~~~~~~~~~
session = tfs.Session()
output = session.run(z, {x: 1.0})
print(output)
output = session.run(z, {x: 2.0})
print(output)