Пример #1
0
def main():

    Ntrain = 20000

    # Input data from file
    fname_train_image = "./data/train-images.idx3-ubyte"
    fname_train_label = "./data/train-labels.idx1-ubyte"

    fname_test_image = "./data/t10k-images.idx3-ubyte"
    fname_test_label = "./data/t10k-labels.idx1-ubyte"

    train_sample, valid_sample = LoadSample(fname_train_image,
                                            fname_train_label,
                                            validate=True)
    test_sample = LoadSample(fname_test_image, fname_test_label)

    print(train_sample.images.shape)

    while input("q: quit, other : continue ") != 'q':
        i = np.random.randint(train_sample.images.shape[0])
        arr = train_sample.images[i, :]
        lab = train_sample.labels[i, :]
        for j in range(10):
            if lab[j] == 1: print("Number = ", j)
        arr = 255 * np.ones([28, 28]) - arr.reshape(28, 28)
        plt.gray()
        plt.imshow(arr)
        plt.show()
        i += 1

    mnist = base.Datasets(train=train_sample,
                          validation=valid_sample,
                          test=test_sample)

    model = Model("model")

    model.build()

    model.define_loss()

    train_step = model.Get_train_step()

    ## save graph
    saver = model.Get_Saver()
    graph_location = "./logs/"
    sum_accuracy = tf.summary.scalar("accuracy", model.accuracy)
    sum_xentropy = tf.summary.scalar("cross entropy", model.cross_entropy)
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())
    print('Saving graph to: %s' % graph_location)
    ##

    with tf.Session() as sess:
        init = model.Get_initializer()
        sess.run(init)
        for i in range(Ntrain):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
                feed_dict = model.Get_feed_dict(batch[0], batch[1], 1)
                train_accuracy = model.accuracy.eval(feed_dict=feed_dict)
                tmp_sum_accuracy = sum_accuracy.eval(feed_dict=feed_dict)
                tmp_sum_xentropy = sum_xentropy.eval(feed_dict=feed_dict)
                train_writer.add_summary(tmp_sum_accuracy, i)
                train_writer.add_summary(tmp_sum_xentropy, i)
                print('step %d, training accuracy %g' % (i, train_accuracy))

            feed_dict = model.Get_feed_dict(batch[0], batch[1], 0.5)
            train_step.run(feed_dict=feed_dict
                           )  # dropout 50% neurons within 1024->10 layer

        feed_dict = model.Get_feed_dict(mnist.test.images, mnist.test.labels,
                                        1)
        print('test accuracy %g' % model.accuracy.eval(feed_dict=feed_dict))

        savePath = saver.save(sess, "./out/Model_out")

    train_writer.flush()
    train_writer.close()
Пример #2
0
def main() :

  fname_test_image = "./data/t10k-images.idx3-ubyte"
  fname_test_label = "./data/t10k-labels.idx1-ubyte"

  test_sample  = LoadSample(fname_test_image,fname_test_label)
  print(test_sample.images.shape)

  model = Model("model")

  model.build()

  model.define_loss()

  train_step = model.Get_train_step()

  # restore
  modeldir = "./out/" 

  with tf.Session() as sess :

    model.Restore(sess,modeldir)

    W_conv1 = sess.run(model.weight["conv1"])
    W_conv2 = sess.run(model.weight["conv2"])
    b_conv1 = sess.run(model.bias["conv1"])
    b_conv2 = sess.run(model.bias["conv2"])

    x = tf.cast( np.array( [128.]*28*28 ).reshape([1,-1]), tf.float32 );
    x = tf.reshape(x,[-1,28,28,1])
    filter_1 = tf.nn.relu( tf.nn.conv2d(x, W_conv1, strides=[1,1,1,1],padding="SAME") + b_conv1 )
    pool_1   = tf.nn.max_pool( filter_1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
    print("filter_1: ",filter_1.shape)
    print("pool_1  : ",pool_1.shape)
    print("Finish loading model\n")
   
    # convolution 1 
    image_1 = np.zeros([32,5,5])
    for i in range(32) :
      tmp_image = np.zeros([5,5])

      for im in range(100) :
        tmp_pict = test_sample.images[im,:].reshape([28,28])
        for ix in range(2,26) :
          for iy in range(2,26) :
            tmp_weight = 0

            for ipx in range(-2,3) :
              for ipy in range(-2,3) :
                tmp_weight = tmp_weight + W_conv1[ipx,ipy,0,i] * tmp_pict[ix+ipx,iy+ipy]

            if tmp_weight + b_conv1[i] <= 0: tmp_weight = 0
            else : tmp_weight = tmp_weight + b_conv1[i]

            tmp_image = tmp_image + tmp_pict[ix-2:ix+3,iy-2:iy+3] * tmp_weight

      max_pt = np.max(tmp_image)
      min_pt = np.min(tmp_image)
      print("conv1 filter %d:  max: %.2f  min: %.2f" % (i,max_pt,min_pt) )
      scale = 1

      if max_pt <= 0 :
        scale = -1./min_pt
      else :
        scale = 1./max_pt

      tmp_image = tmp_image * scale * 128 + 128
      tmp_image = tf.cast( tf.reshape(tmp_image,[5,5]), tf.int32 )
      image_1[i,:,:] = sess.run(tmp_image)

      fig = plt.imshow(image_1[i,:,:], interpolation='none', cmap=plt.cm.gray_r)
      plt.colorbar()
      plt.savefig("filter/filter1_"+str(i)+".png")
      plt.clf()
      
    # convolution 2
    image_2 = np.zeros([64,14,14])

    for im in range(100) :
      tmp_pict = test_sample.images[im,:].reshape([28,28])
      x = tf.reshape(tmp_pict,[-1,28,28,1])
      tmp_conv1 = sess.run( tf.nn.relu( tf.nn.conv2d(x, W_conv1, strides=[1,1,1,1],padding="SAME") + b_conv1 ) )
      tmp_conv1 = tmp_conv1.reshape([28,28,32])

      pool_val = np.zeros([32,14,14])
      pool_x   = np.zeros([32,14,14])
      pool_y   = np.zeros([32,14,14])
      # get max_pool
      for ifilter in range(32) :
        
        for ipoolx in range(14) :
          for ipooly in range(14) :
            tmp_pool_val = -9999
            tmp_pool_x   = 0
            tmp_pool_y   = 0

            for tmp_i in range(2) :
              for tmp_j in range(2) :
                tmp_val = tmp_conv1[ipoolx*2+tmp_i,ipooly*2+tmp_j,ifilter]
                if tmp_val > tmp_pool_val :
                  tmp_pool_val = tmp_val
                  tmp_pool_x   = tmp_i
                  tmp_pool_y   = tmp_j

            pool_val[ifilter,ipoolx,ipooly] = tmp_pool_val
            pool_x  [ifilter,ipoolx,ipooly] = tmp_pool_x
            pool_y  [ifilter,ipoolx,ipooly] = tmp_pool_y

      print("make pool: image ",im)
      # convolution 2
      for ix in range(2,12) :
        for iy in range(2,12) :
          tmp_weight = np.zeros([32,64])
          tmp_sum = 0

          for i in range(64) :
            tmp_image = np.zeros([14,14])

            for ifilter in range(32) :
              Wx = np.sum( (W_conv2[:,:,ifilter,i] * pool_val[ifilter,ix+-2:ix+3,iy-2:iy+3]) )
              tmp_sum += Wx
              tmp_weight[ifilter,i] += Wx

            if tmp_sum + b_conv2[i] <= 0 : continue


            for ipx in range(-2,3) :
              for ipy in range(-2,3) :
                for ifilter in range(32) : 

                  tmp_px = int( 4+ipx*2+pool_x[ifilter,ix+ipx,iy+ipy] )
                  tmp_py = int( 4+ipy*2+pool_y[ifilter,ix+ipx,iy+ipy] )
                  tmp_image[tmp_px:tmp_px+5,tmp_py:tmp_py+5] += image_1[ifilter,:,:].astype(float) * tmp_weight[ifilter,i]

            image_2[i,:,:] += tmp_image

    for i in range(64) :
      tmp_image = image_2[i,:,:]
      max_pt = np.max(tmp_image)
      min_pt = np.min(tmp_image)
      print("conv2 filter %d:  max: %.2f  min: %.2f" % (i,max_pt,min_pt) )
      scale = 1

      if max_pt < 0 :
        scale = -1./min_pt
      elif min_pt > 0 :
        scale = 1./max_pt
      else : continue

      tmp_image = tmp_image * scale * 128 + 128
      tmp_image = tf.cast( tmp_image, tf.int32 )
      image_2[i,:,:] = sess.run(tmp_image)

      fig = plt.imshow(image_2[i,:,:], interpolation='none', cmap=plt.cm.gray_r)
      plt.colorbar()
      plt.savefig("filter/filter2_"+str(i)+".png")
      plt.clf()