Beispiel #1
0
                 train=train_conv)
l1 = Convolution(input_sizes=[batch_size, 224, 224, 64],
                 filter_sizes=[3, 3, 64, 64],
                 num_classes=num_classes,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=learning_rate,
                 activation=Relu(),
                 bias=0.0,
                 last_layer=False,
                 name="conv2",
                 load=weights_conv,
                 train=train_conv)
l2 = MaxPool(size=[batch_size, 224, 224, 64],
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding="VALID")

l3 = Convolution(input_sizes=[batch_size, 112, 112, 64],
                 filter_sizes=[3, 3, 64, 128],
                 num_classes=num_classes,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=learning_rate,
                 activation=Relu(),
                 bias=0.0,
                 last_layer=False,
                 name="conv3",
                 load=weights_conv,
                 train=train_conv)
Beispiel #2
0
optimizer = tf.train.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999, epsilon=1).minimize(loss)
'''

###############################################################

l0 = Convolution(input_sizes=[batch_size, 256, 256, 3],
                 filter_sizes=[3, 3, 3, 16],
                 num_classes=num_classes,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Relu(),
                 last_layer=False)
l1 = MaxPool(size=[batch_size, 256, 256, 16],
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding="SAME")
l2 = FeedbackConv(size=[batch_size, 128, 128, 16],
                  num_classes=num_classes,
                  sparse=sparse,
                  rank=rank)

l3 = Convolution(input_sizes=[batch_size, 128, 128, 16],
                 filter_sizes=[3, 3, 16, 16],
                 num_classes=num_classes,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Relu(),
                 last_layer=False)
Beispiel #3
0
l1 = FeedbackConv(size=[batch_size, 28, 28, 32],
                  num_classes=10,
                  sparse=sparse,
                  rank=args.rank)

l2 = Convolution(input_sizes=[batch_size, 28, 28, 32],
                 filter_sizes=[3, 3, 32, 64],
                 num_classes=10,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Tanh(),
                 last_layer=False)
l3 = MaxPool(size=[batch_size, 28, 28, 64],
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding="VALID")
l4 = FeedbackConv(size=[batch_size, 14, 14, 64],
                  num_classes=10,
                  sparse=sparse,
                  rank=args.rank)

l5 = ConvToFullyConnected(shape=[14, 14, 64])
l6 = FullyConnected(size=[14 * 14 * 64, 128],
                    num_classes=10,
                    init_weights=args.init,
                    alpha=ALPHA,
                    activation=Tanh(),
                    last_layer=False)
l7 = FeedbackFC(size=[14 * 14 * 64, 128],
                num_classes=10,
Beispiel #4
0
    get_conv5_bias = tf.get_default_graph().get_tensor_by_name(
        os.path.split(conv5.name)[0] + '/bias:0')

else:
    l0 = Convolution(input_sizes=[batch_size, 227, 227, 3],
                     filter_sizes=[11, 11, 3, 96],
                     num_classes=num_classes,
                     init_filters=args.init,
                     strides=[1, 4, 4, 1],
                     padding="VALID",
                     alpha=ALPHA,
                     activation=Relu(),
                     bias=0.0,
                     last_layer=False)
    l1 = MaxPool(size=[batch_size, 55, 55, 96],
                 ksize=[1, 3, 3, 1],
                 strides=[1, 2, 2, 1],
                 padding="VALID")

    l2 = Convolution(input_sizes=[batch_size, 27, 27, 96],
                     filter_sizes=[5, 5, 96, 256],
                     num_classes=num_classes,
                     init_filters=args.init,
                     strides=[1, 1, 1, 1],
                     padding="SAME",
                     alpha=ALPHA,
                     activation=Relu(),
                     bias=0.0,
                     last_layer=False)
    l3 = MaxPool(size=[batch_size, 27, 27, 256],
                 ksize=[1, 3, 3, 1],
                 strides=[1, 2, 2, 1],
Beispiel #5
0
##############################################

tf.set_random_seed(0)
tf.reset_default_graph()

batch_size = tf.placeholder(tf.int32, shape=())
XTRAIN = tf.placeholder(tf.float32, [None, 32, 32, 3])
YTRAIN = tf.placeholder(tf.float32, [None, 100])
XTRAIN = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), XTRAIN)

XTEST = tf.placeholder(tf.float32, [None, 32, 32, 3])
YTEST = tf.placeholder(tf.float32, [None, 100])
XTEST = tf.map_fn(lambda frame1: tf.image.per_image_standardization(frame1), XTEST)

l0 = Convolution(input_sizes=[batch_size, 32, 32, 3], filter_sizes=[5, 5, 3, 96], num_classes=100, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=ALPHA, activation=Tanh(), last_layer=False)
l1 = MaxPool(size=[batch_size, 32, 32, 96], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME")
l2 = FeedbackConv(size=[batch_size, 16, 16, 96], num_classes=100, sparse=sparse, rank=rank)

l3 = Convolution(input_sizes=[batch_size, 16, 16, 96], filter_sizes=[5, 5, 96, 128], num_classes=100, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=ALPHA, activation=Tanh(), last_layer=False)
l4 = MaxPool(size=[batch_size, 16, 16, 128], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME")
l5 = FeedbackConv(size=[batch_size, 8, 8, 128], num_classes=100, sparse=sparse, rank=rank)

l6 = Convolution(input_sizes=[batch_size, 8, 8, 128], filter_sizes=[5, 5, 128, 256], num_classes=100, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=ALPHA, activation=Tanh(), last_layer=False)
l7 = MaxPool(size=[batch_size, 8, 8, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME")
l8 = FeedbackConv(size=[batch_size, 4, 4, 256], num_classes=100, sparse=sparse, rank=rank)

l9 = ConvToFullyConnected(shape=[4, 4, 256])
l10 = FullyConnected(size=[4*4*256, 2048], num_classes=100, init_weights=args.init, alpha=ALPHA, activation=Tanh(), last_layer=False)
l11 = FeedbackFC(size=[4*4*256, 2048], num_classes=100, sparse=sparse, rank=rank)

l12 = FullyConnected(size=[2048, 2048], num_classes=100, init_weights=args.init, alpha=ALPHA, activation=Tanh(), last_layer=False)
Beispiel #6
0
XTEST = tf.placeholder(tf.float32, [None, 32, 32, 3])
YTEST = tf.placeholder(tf.float32, [None, 10])
XTEST = tf.map_fn(lambda frame1: tf.image.per_image_standardization(frame1),
                  XTEST)

l0 = Convolution(input_sizes=[batch_size, 32, 32, 3],
                 filter_sizes=[5, 5, 3, 96],
                 num_classes=10,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Tanh(),
                 last_layer=False)
l1 = MaxPool(size=[batch_size, 32, 32, 96],
             ksize=[1, 3, 3, 1],
             strides=[1, 2, 2, 1],
             padding="VALID")
l2 = FeedbackConv(size=[batch_size, 15, 15, 96],
                  num_classes=10,
                  sparse=sparse,
                  rank=rank)

l3 = Convolution(input_sizes=[batch_size, 15, 15, 96],
                 filter_sizes=[5, 5, 96, 128],
                 num_classes=10,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Tanh(),
                 last_layer=False)
Beispiel #7
0
XTEST = tf.placeholder(tf.float32, [None, 28, 28, 1])
YTEST = tf.placeholder(tf.float32, [None, 10])
XTEST = tf.map_fn(lambda frame1: tf.image.per_image_standardization(frame1),
                  XTEST)

l0 = Convolution(input_sizes=[batch_size, 28, 28, 1],
                 filter_sizes=[3, 3, 1, 32],
                 num_classes=10,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Tanh(),
                 last_layer=False)
l1 = MaxPool(size=[batch_size, 28, 28, 32],
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding="SAME")
l2 = FeedbackConv(size=[batch_size, 14, 14, 32],
                  num_classes=10,
                  sparse=sparse,
                  rank=rank)

l3 = Convolution(input_sizes=[batch_size, 14, 14, 32],
                 filter_sizes=[3, 3, 32, 64],
                 num_classes=10,
                 init_filters=args.init,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 alpha=ALPHA,
                 activation=Tanh(),
                 last_layer=False)