Exemplo n.º 1
0
            def save(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW('gan9g.txt')
                gfc0.save_ToFile(bgfc0, wgfc0, saveToFile)
                gdc0.save_ToFile(bgdc0, wgdc0, saveToFile)
                gdc1.save_ToFile(bgdc1, wgdc1, saveToFile)
                gdc2.save_ToFile(bgdc2, wgdc2, saveToFile)
                gdc3.save_ToFile(bgdc3, wgdc3, saveToFile)
                saveToFile.flush();saveToFile.close()

                saveToFile = ConvNet.openEmptyFileW('gan9d.txt')
                dcv0.save_ToFile(bdcv0, wdcv0, saveToFile)
                dcv1.save_ToFile(bdcv1, wdcv1, saveToFile)
                dcv2.save_ToFile(bdcv2, wdcv2, saveToFile)
                dcv3.save_ToFile(bdcv3, wdcv3, saveToFile)
                dfc0.save_ToFile(bdfc0, wdfc0, saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
Exemplo n.º 2
0
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan0g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan0d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
Exemplo n.º 3
0
def train():
    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())
    
        for j in range(0, 20):
            #打印当前网络的输出值
            a1 = sess.run(_net, feed_dict={inputlayer:verifydata1})
            a2 = sess.run(_net, feed_dict={inputlayer:verifydata2})
            a3 = sess.run(_net, feed_dict={inputlayer:verifydata3})
            print(j, a1[0], a2[0], a3[0])#可以看到输出值越来越接近输入的值
            
            #执行训练
            for i in range(0, 100):
                sess.run(optimizer, feed_dict={finaldata: indata, inputlayer: indata})
        
        #保存已训练的网络
        Saver = []
        for item in plist:
            Saver.append(item.getSaver(sess))
            
        saveToFile = ConvNet.openEmptyFileW("test.txt")
        for item in Saver:
            item(saveToFile)
        saveToFile.flush();saveToFile.close()
Exemplo n.º 4
0
def train():
    #with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
    with tf.Session() as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())

        for j in xrange(0, 100):
            #打印当前网络的输出值
            accurate = 0
            for _i in xrange(0,100):
                testData,testLabel = MNISTData.extract_testdata()
                lbl = sess.run(_test, feed_dict={testOne:testData})
                if np.argmax(lbl, 1) == testLabel:
                    accurate = accurate + 0.01
            
            #执行训练
            totalLoss = 0.0
            for _i in xrange(0,10):
                trainData,trainLabel = MNISTData.extract_traindata(BATCH_SIZE)
                _,_loss = sess.run([optimizer,loss], feed_dict={labels_node: trainLabel, inputlayer: trainData})
                totalLoss = totalLoss + _loss
            
            print(j,accurate,totalLoss)
            
        #保存已训练的网络
        Saver = []
        for item in plist:
            Saver.append(item.getSaver(sess))
            
        saveToFile = ConvNet.openEmptyFileW("MNIST.txt")
        for item in Saver:
            item(saveToFile)
        saveToFile.flush();saveToFile.close()
Exemplo n.º 5
0
def train():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saveimg("base0.bmp",CifarData,0)
        saveimg("base1.bmp",CifarData,1)
        saveimg("base2.bmp",CifarData,2)
        saveimg("base3.bmp",CifarData,3)
        saveimg("base4.bmp",CifarData,59110)
        saveimg("base5.bmp",CifarData,59111)
        saveimg("base6.bmp",CifarData,59112)
        saveimg("base7.bmp",CifarData,59113)

        verifydata[0] = CifarData[0]
        verifydata[1] = CifarData[1]
        verifydata[2] = CifarData[2]
        verifydata[3] = CifarData[3]
        verifydata[4] = CifarData[59110]
        verifydata[5] = CifarData[59111]
        verifydata[6] = CifarData[59112]
        verifydata[7] = CifarData[59113]
  
        for j in xrange(0, 1000):

            for k in xrange(0,10):#train times
                print(str(k)+' ',end='')
                sys.stdout.flush()
                for i in xrange(0, 800):#train range 
                    for dj in xrange(0, BATCH_SIZE):
                        inputData[dj] = CifarData[dj+i*BATCH_SIZE]
                    sess.run(optimizer, feed_dict={finaldata: inputData, inputLayer: inputData})

            print()
            print(j)
            
            _out = sess.run(regeneratedImg, feed_dict={inputLayer:verifydata})
            saveimg(str(j)+"0.bmp",_out,0)
            saveimg(str(j)+"1.bmp",_out,1)
            saveimg(str(j)+"2.bmp",_out,2)
            saveimg(str(j)+"3.bmp",_out,3)
            saveimg(str(j)+"4.bmp",_out,4)
            saveimg(str(j)+"5.bmp",_out,5)
            saveimg(str(j)+"6.bmp",_out,6)
            saveimg(str(j)+"7.bmp",_out,7)
            
            print("saving")
            testfile = ConvNet.openEmptyFileW("cifar"+str(j)+".txt")
            conv1save(sess,testfile)
            conv2save(sess,testfile)
            conv3save(sess,testfile)
            fc1saver(sess,testfile)
            fc2saver(sess,testfile)
            uconv1save(sess,testfile)
            uconv2save(sess,testfile)
            uconv3save(sess,testfile)
            if testfile:testfile.flush(),testfile.close()   
            print("saved")
Exemplo n.º 6
0
def train():
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_node, logits=fc2))
    batch = tf.Variable(0, dtype=tf.float32)
    learning_rate = tf.train.exponential_decay(
        0.01,                # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        train_size,          # Decay step.
        0.95,                # Decay rate.
        staircase=True)
    # Use simple momentum for the optimization.
    optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
    # Create a local session to run the training.
    start_time = time.time()
    with tf.Session() as sess:
        # Run all the initializers to prepare the trainable parameters.
        tf.global_variables_initializer().run()
        print('Initialized!')
        # Loop through training steps.
        for step in xrange(0,100):
            # Compute the offset of the current minibatch in the data.
            # Note that we could use better randomization across epochs.
            offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
            batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
            batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
            # This dictionary maps the batch data (as a numpy array) to the
            # node in the graph it should be fed to.
            feed_dict = {inputLayer: batch_data,labels_node: batch_labels}
            # Run the optimizer to update weights.
            sess.run(optimizer, feed_dict=feed_dict)
            # print some extra information once reach the evaluation frequency
            if step % EVAL_FREQUENCY == 0:
                # fetch some extra nodes' data
                l, predictions = sess.run([loss, train_prediction],
                                              feed_dict=feed_dict)
                elapsed_time = time.time() - start_time
                start_time = time.time()
                print(elapsed_time,l,  error_rate(predictions, batch_labels), error_rate(eval_in_batches(validation_data, sess), validation_labels))
    
        print("save start")
        testfile = ConvNet.openEmptyFileW('conv.txt')
        conv1save(sess,testfile)
        conv2save(sess,testfile)
        fc1save(sess,testfile)
        fc2save(sess,testfile)
        if testfile:testfile.flush(),testfile.close()   
        print("save done")
Exemplo n.º 7
0
def train():
    with tf.Session() as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())
    
        for j in range(0, 20):
            #打印当前网络的输出值
            a1 = sess.run(fc2, feed_dict={inputlayer:verifydata1})
            a2 = sess.run(fc2, feed_dict={inputlayer:verifydata2})
            a3 = sess.run(fc2, feed_dict={inputlayer:verifydata3})
            print(j, a1[0], a2[0], a3[0])#可以看到输出值越来越接近输入的值
            
            #执行训练
            for i in range(0, 100):
                sess.run(optimizer, feed_dict={finaldata: indata, inputlayer: indata})
        
        #保存已训练的网络
        testfile = ConvNet.openEmptyFileW('test.txt')
        fc1saver(sess,testfile)
        fc2saver(sess,testfile)
        if testfile:testfile.flush(),testfile.close()   
Exemplo n.º 8
0
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    ConvNet.outprint(sess.run(inputLayer, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(conv1, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(conv2, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(fc1, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(fc2, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(uconv1, feed_dict={inputLayer: ind1}))
    print()
    ConvNet.outprint(sess.run(uconv2, feed_dict={inputLayer: ind1}))
    print()


    testfile = ConvNet.openEmptyFileW('conv.txt')
    conv1save(sess, testfile)
    conv2save(sess, testfile)
    fc1save(sess, testfile)
    fc2save(sess, testfile)
    uconv1save(sess, testfile)
    uconv2save(sess, testfile)
    if testfile:testfile.flush(), testfile.close()