def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        nin = InputLayer(x, name='in')

        #Localization Network
        ln1 = Conv2d(nin, 16, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='tc01')
        
        ln1 = Conv2d(ln1, 32, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc02')
             
            
        ln1 = Conv2d(ln1, 64, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc03')
        ln1 = BatchNormLayer(ln1, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='batch_norm1')
        ln1 = Conv2d(ln1, 128, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc04')
        ln1 = BatchNormLayer(ln1, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='batch_norm2')
        ln1 = FlattenLayer(ln1, name='f01')
        ln1 = DenseLayer(ln1, n_units=64, act=tf.identity, name='d01')
        ln1 = DenseLayer(ln1, n_units=3, act=tf.identity, name='d02')

        stn = stn_affine(nin, ln1, out_size=[dim1, dim2], name='ST1')

        ln2 = Conv2d(stn, 16, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='tc05')
        ln2 = Conv2d(ln2, 32, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc06')
        ln2 = BatchNormLayer(ln2, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='batch_norm3')
        ln2 = Conv2d(ln2, 64, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc07')
        ln2 = BatchNormLayer(ln2, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='batch_norm4')
        ln2 = Conv2d(ln2, 128, (2, 2), (2, 2), act=tf.nn.relu, padding='SAME', name='tc08')
        ln2 = FlattenLayer(ln2, name='f02')
        ln2 = DenseLayer(ln2, n_units=64, act=tf.identity, name='d03')
        ln2 = DenseLayer(ln2, n_units=3, act=tf.identity, name='d04')

        final_out = tf.add(ln1.outputs,ln2.outputs )

		#overall mean error of all the three parameters
        ce = tl.cost.mean_squared_error(final_out, y_, 'cost')
        L2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + L2
        tf.summary.scalar('cost',cost)

        #Individual errors for rotation, x translation and y translation
        loss1 = tf.losses.mean_squared_error(final_out[:,0], y_[:,0])
        tf.summary.scalar('loss1', loss1)

        loss2 = tf.losses.mean_squared_error(final_out[:,1], y_[:,1])
        tf.summary.scalar('loss2', loss2)

        loss3 = tf.losses.mean_squared_error(final_out[:,2], y_[:,2])
        tf.summary.scalar('loss3', loss3)

    return ln1,ln2, cost,  final_out, loss1, loss2 , loss3, ln1.outputs
Beispiel #2
0
def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        nin = InputLayer(x, name='in')

        #Localization Network
        ln = Conv2d(nin,
                    16, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc01')
        ln = Conv2d(nin,
                    64, (3, 3), (2, 2),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc02')
        ln = Conv2d(ln,
                    128, (3, 3), (2, 2),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc03')
        ln = FlattenLayer(ln, name='f01')
        ln = DenseLayer(ln, n_units=64, act=tf.identity, name='d01')
        ln = DenseLayer(ln, n_units=3, act=tf.identity, name='d02')

        #STN
        stn = stn_affine(nin, ln, out_size=[dim1, dim2], name='ST')

        nt = Conv2d(stn,
                    16, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc1')
        nt = Conv2d(nt,
                    32, (3, 3), (2, 2),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc2')
        # 32x32x16

        nt = Conv2d(nt,
                    64, (3, 3), (2, 2),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc3')
        nt = Conv2d(nt,
                    128, (3, 3), (2, 2),
                    act=tf.nn.relu,
                    padding='SAME',
                    name='tc4')

        nt = FlattenLayer(nt, name='f')
        nt = DenseLayer(nt, n_units=64, act=tf.identity, name='d1')
        nt = DenseLayer(nt, n_units=3, act=tf.identity, name='do')

        y = nt.outputs
        localization_out = ln.outputs
        final_out = y  #tf.add(localization_out, y)

        #overall mean error of all the three parameters
        cost = tl.cost.mean_squared_error(final_out, y_, 'cost')
        tf.summary.scalar('cost', cost)

        #Individual errors for rotation, x translation and y translation
        loss1 = tf.losses.mean_squared_error(final_out[:, 0], y_[:, 0])
        tf.summary.scalar('loss1', loss1)

        loss2 = tf.losses.mean_squared_error(final_out[:, 1], y_[:, 1])
        tf.summary.scalar('loss2', loss2)

        loss3 = tf.losses.mean_squared_error(final_out[:, 2], y_[:, 2])
        tf.summary.scalar('loss3', loss3)

    return nt, cost, final_out, loss1, loss2, loss3, localization_out
Beispiel #3
0
def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        nin = InputLayer(x, name='in')

        #Localization Network

        ln1 = Conv2d(nin,
                     16, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc01')
        ln1 = Conv2d(ln1,
                     64, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc02')
        ln1 = Conv2d(ln1,
                     128, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc03')
        ln1 = FlattenLayer(ln1, name='f01')
        ln1 = DropoutLayer(ln1, 0.5, True, is_train, name='drop1')
        ln1 = DenseLayer(ln1, n_units=64, act=tf.identity, name='d01')
        ln1 = DropoutLayer(ln1, 0.5, True, is_train, name='drop2')
        ln1 = DenseLayer(ln1, n_units=3, act=tf.identity, name='d02')

        stn = stn_affine(nin, ln1, out_size=[dim1, dim2], name='ST1')

        ln2 = Conv2d(stn,
                     16, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc04')
        ln2 = Conv2d(ln2,
                     64, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc05')
        ln2 = Conv2d(ln2,
                     128, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc06')
        ln2 = FlattenLayer(ln2, name='f02')
        ln2 = DropoutLayer(ln2, 0.5, True, is_train, name='drop3')
        ln2 = DenseLayer(ln2, n_units=64, act=tf.identity, name='d03')
        ln2 = DropoutLayer(ln2, 0.5, True, is_train, name='drop4')
        ln2 = DenseLayer(ln2, n_units=3, act=tf.identity, name='d04')

        stn2 = stn_affine(stn, ln2, out_size=[dim1, dim2], name='ST2')

        ln3 = Conv2d(stn2,
                     16, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc07')
        ln3 = Conv2d(ln3,
                     64, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc08')
        ln3 = Conv2d(ln3,
                     128, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc09')
        ln3 = FlattenLayer(ln3, name='f03')
        ln3 = DropoutLayer(ln3, 0.5, True, is_train, name='drop5')
        ln3 = DenseLayer(ln3, n_units=64, act=tf.identity, name='d05')
        ln3 = DropoutLayer(ln3, 0.5, True, is_train, name='drop6')
        ln3 = DenseLayer(ln3, n_units=3, act=tf.identity, name='d06')

        stn3 = stn_affine(stn2, ln3, out_size=[dim1, dim2], name='ST3')

        ln4 = Conv2d(stn3,
                     16, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc10')
        ln4 = Conv2d(ln4,
                     64, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc11')
        ln4 = Conv2d(ln4,
                     128, (3, 3), (2, 2),
                     act=tf.nn.relu,
                     padding='SAME',
                     name='tc12')
        ln4 = FlattenLayer(ln4, name='f04')
        ln4 = DropoutLayer(ln4, 0.5, True, is_train, name='drop7')
        ln4 = DenseLayer(ln4, n_units=64, act=tf.identity, name='d07')
        ln4 = DropoutLayer(ln4, 0.5, True, is_train, name='drop8')
        ln4 = DenseLayer(ln4, n_units=3, act=tf.identity, name='d08')

        final_out = tf.add(ln1.outputs, ln2.outputs)
        final_out = tf.add(final_out, ln3.outputs)
        final_out = tf.add(final_out, ln4.outputs)

        #overall mean error of all the three parameters
        cost = tl.cost.mean_squared_error(final_out, y_, 'cost')
        tf.summary.scalar('cost', cost)

        #Individual errors for rotation, x translation and y translation
        loss1 = tf.losses.mean_squared_error(final_out[:, 0], y_[:, 0])
        tf.summary.scalar('loss1', loss1)

        loss2 = tf.losses.mean_squared_error(final_out[:, 1], y_[:, 1])
        tf.summary.scalar('loss2', loss2)

        loss3 = tf.losses.mean_squared_error(final_out[:, 2], y_[:, 2])
        tf.summary.scalar('loss3', loss3)

    return ln1, ln2, ln3, ln4, cost, final_out, loss1, loss2, loss3, ln1.outputs