Ejemplo n.º 1
0
    res_def = tf.GraphDef()
    for n0 in graph_def.node:
        n = res_def.node.add() 
        n.MergeFrom(n0)
        n.name = rename_func(n.name)
        for i, s in enumerate(n.input):
            n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
    return res_def




DF = 32             # Dimension of D filters in first conv layer. default [64]
dlist = []
loadFromFile = ConvNet.openTextFileR('faceTrain.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF,filterSize = 5,loadFromFile = loadFromFile))#64out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#64out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*4,outDepth = DF*8,filterSize = 5,loadFromFile = loadFromFile))#16out
dcv4 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*8,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#8out
dcv5 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*16,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#4out
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*16*3*4,outDepth = 128,loadFromFile = loadFromFile))
dfc1 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = 128,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

imagesT = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
l0 = dcv0.getLayer(imagesT, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l1 = dcv1.getLayer(l0, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l2 = dcv2.getLayer(l1, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l3 = dcv3.getLayer(l2, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l4 = dcv4.getLayer(l3, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
Ejemplo n.º 2
0
BATCH_SIZE = 100

#用来验证的数据,和训练数据一样,也可以用不同的数据验证

#输入层
inputlayer = tf.placeholder(tf.float32, [BATCH_SIZE, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])
testOne = tf.placeholder(tf.float32, [1, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])

#预期的数据
labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE))

#网络定义
plist = []
loadFromFile = ConvNet.openEmptyFileR('MNIST.txt')#从文件中加载已训练的,文件不存在的话默认会随机初始化网络
cv0 = ConvNet.addlist(plist,ConvNet.Conv(inDepth = MNISTData.IMAGE_CHANNEL,outDepth = 16,filterSize = 5,loadFromFile = loadFromFile))
cv1 = ConvNet.addlist(plist,ConvNet.Conv(inDepth = 16,outDepth = 32,filterSize = 5,loadFromFile = loadFromFile))
fc0 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 7*7*32,outDepth = 64,loadFromFile = loadFromFile))
fc1 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 64,outDepth = 10,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()   

def net(inputT):
    _ret = cv0.getLayer(inputT, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
    _ret = cv1.getLayer(_ret, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
    _ret = ConvNet.Conv2FC_Reshape(_ret)
    _ret = fc0.getLayer(_ret, isRelu=True, fixed = False)
    _ret = fc1.getLayer(_ret, isRelu=False, fixed = False)
    return _ret

fc2 = net(inputlayer)
_net = tf.nn.softmax(fc2)
Ejemplo n.º 3
0
verifydata2 = np.ndarray([batchSize, 4], np.float32)
verifydata2[0] = [2.0, 1.0, 2.0, 1.0]
verifydata3 = np.ndarray([batchSize, 4], np.float32)
verifydata3[0] = [0.0, 0.5, 1.0, 1.5]

#输入层
inputlayer = tf.placeholder(tf.float32, shape=(batchSize, 4))

#预期的数据
finaldata = tf.placeholder(tf.float32, shape=(batchSize, 4))

#网络定义
plist = []
loadFromFile = ConvNet.openEmptyFileR('test.txt')#从文件中加载已训练的,文件不存在的话默认会随机初始化网络
#可以调整 5 这个数字,看对最终结果的影响,数字太小网络容量不足无法学习太复杂的情况
fc0 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 4,outDepth = 5,loadFromFile = loadFromFile))
fc1 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 5,outDepth = 4,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()   

def net(inputT):
    _ret = fc0.getLayer(inputT, isRelu=True, fixed = False)
    _ret = fc1.getLayer(_ret, isRelu=False, fixed = False)
    return _ret

_net = net(inputlayer)

#损失函数
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=train_labels_node, logits=logits))  #这个loss是用来处理分类的情况,现在是回归所以不用
loss = tf.reduce_sum(tf.square(_net - finaldata))

#训练器
Ejemplo n.º 4
0
BATCH_SIZE = 500
testBATCH_SIZE = 40
saveSize = [4, 10]

GF = 32             # Dimension of G filters in first conv layer. default [64]
DF = 32             # Dimension of D filters in first conv layer. default [64]
Z_DIM = 10

LR = 0.0001         # Learning rate

t2, t4 = MNISTData.IMAGE_H//2, MNISTData.IMAGE_H//4
s2, s4 = MNISTData.IMAGE_W//2, MNISTData.IMAGE_W//4

glist = []
loadFromFile = ConvNet.openEmptyFileR('gan0g.txt')
gfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = Z_DIM,loadFromFile = loadFromFile))
gfc1 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = GF*2*t4*s4,loadFromFile = loadFromFile))
gdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))
gdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = MNISTData.IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan0d.txt')
dcva = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = MNISTData.IMAGE_CHANNEL,outDepth = DF*1,filterSize = 1,loadFromFile = loadFromFile))
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*1,filterSize = 5,loadFromFile = loadFromFile))
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*2*t4*s4,outDepth = 1,loadFromFile = loadFromFile))
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*2*t4*s4,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

def generator(z):
Ejemplo n.º 5
0
        bytestream.close()
        bytestream = open(filePath + str(file_index)+".bin","br")
        content_index = 0

    buf = bytestream.read(BATCH_SIZE * IMAGE_H * IMAGE_W * IMAGE_CHANNEL)
    data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
    data = (data) / 256.0 - 0.5
    data = data.reshape(BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL)
    return data



print("startload")
glist = []
loadFromFile = ConvNet.openEmptyFileR('gan4g.txt')
gfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = Z_DIM*4,loadFromFile = loadFromFile))
gfc1 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM*4,outDepth = GF*4*3*4,loadFromFile = loadFromFile))
gdc0 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*4,filterSize = 3,loadFromFile = loadFromFile))#4in
gdc1 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*4,filterSize = 3,loadFromFile = loadFromFile))#8in
gdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#16in
gdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#32in
gdc4 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))#32in
gdc5 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))#64in
if loadFromFile:loadFromFile.close()

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan4d.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*1,filterSize = 7,loadFromFile = loadFromFile))#64out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#16out
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 3,loadFromFile = loadFromFile))#8out
Ejemplo n.º 6
0
def resblock():
    gcva = ConvNet.addlist(glist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = GF*1,filterSize = 5,biasInitVal=0,loadFromFile = loadFromFile))
    gcvb = ConvNet.addlist(glist,ConvNet.Conv(inDepth = GF*1         ,outDepth = GF*1,filterSize = 5,biasInitVal=0,loadFromFile = loadFromFile))
    gcvc = ConvNet.addlist(glist,ConvNet.Conv(inDepth = GF*1         ,outDepth = GF*1,filterSize = 5,biasInitVal=0,loadFromFile = loadFromFile))
    gcvd = ConvNet.addlist(glist,ConvNet.Conv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,biasInitVal=0,loadFromFile = loadFromFile))
    return gcva,gcvb,gcvc,gcvd
Ejemplo n.º 7
0
    gcvd = ConvNet.addlist(glist,ConvNet.Conv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,biasInitVal=0,loadFromFile = loadFromFile))
    return gcva,gcvb,gcvc,gcvd

gcva0,gcvb0,gcvc0,gcvd0 = resblock()
gcva1,gcvb1,gcvc1,gcvd1 = resblock()
gcva2,gcvb2,gcvc2,gcvd2 = resblock()
gcva3,gcvb3,gcvc3,gcvd3 = resblock()
gcva4,gcvb4,gcvc4,gcvd4 = resblock()
gcva5,gcvb5,gcvc5,gcvd5 = resblock()

if loadFromFile:loadFromFile.close()

dlist = []
DF = 64             # Dimension of D filters in first conv layer. default [64]
loadFromFile = ConvNet.openEmptyFileR('gan12srd.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*1,filterSize = 5,loadFromFile = loadFromFile))#128out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#64out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*2,filterSize = 3,loadFromFile = loadFromFile))#16out
dcv4 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*1,filterSize = 3,loadFromFile = loadFromFile))#8out
dcv5 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = 1,filterSize = 1,loadFromFile = loadFromFile))#8out

if loadFromFile:loadFromFile.close()

print("endload")
def generator(inputT):
    net = inputT
    
    def getLayer(net,gcva,gcvb,gcvc,gcvd):
        inter = gcva.getLayer(net,   convStride = 1, poolSize = 1,isRelu=True, fixed = False)
        inter = gcvb.getLayer(inter, convStride = 1, poolSize = 1,isRelu=True, fixed = False)
Ejemplo n.º 8
0
        bytestream.close()
        bytestream = open(filePath + str(file_index)+".bin","br")
        content_index = 0

    buf = bytestream.read(BATCH_SIZE * IMAGE_H * IMAGE_W * IMAGE_CHANNEL)
    data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
    data = (data) / 256.0 - 0.5
    data = data.reshape(BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL)
    return data

t2, t4, t8, t16 = IMAGE_H//2, IMAGE_H//4, IMAGE_H//8, IMAGE_H//16
s2, s4, s8, s16 = IMAGE_W//2, IMAGE_W//4, IMAGE_W//8, IMAGE_W//16

glist = []
loadFromFile = ConvNet.openEmptyFileR('gan10g.txt')
gfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = GF*8*t16*s16,loadFromFile = loadFromFile))
gdc0 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*8,outDepth = GF*4,filterSize = 5,loadFromFile = loadFromFile))
gdc1 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))
gdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))
gdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan10d.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*1,filterSize = 5,loadFromFile = loadFromFile))
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 5,loadFromFile = loadFromFile))
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*4,outDepth = DF*8,filterSize = 5,loadFromFile = loadFromFile))
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*8*t16*s16,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()