def AGModel(x, signal, kernalshape, phase, height=None, width=None, scope=None):
    with tf.name_scope(scope):
        # attention input
        Wg = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2],
                                n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'Wg')
        Bg = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'Bg')
        convg = conv2d(signal, Wg) + Bg
        convg = normalizationlayer(convg, phase, height=height, width=width, norm_type='group',
                                   scope=str(scope) + 'normg')
        # input
        Wf = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2],
                                n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'Wf')
        Bf = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'Bf')
        convf = conv2d(x, Wf) + Bf
        convf = normalizationlayer(convf, phase, height=height, width=width, norm_type='group',
                                   scope=str(scope) + 'normf')
        # add input and attention input
        convadd = resnet_Add(x1=convg, x2=convf)
        convadd = tf.nn.relu(convadd)

        # generate attention gat coe
        attencoekernalshape = (1, 1, kernalshape[-1], 1)
        Wpsi = weight_xavier_init(shape=attencoekernalshape,
                                  n_inputs=attencoekernalshape[0] * attencoekernalshape[1] * attencoekernalshape[2],
                                  n_outputs=attencoekernalshape[-1], activefunction='sigomd',
                                  variable_name=str(scope) + 'Wpsi')
        Bpsi = bias_variable([attencoekernalshape[-1]], variable_name=str(scope) + 'Bpsi')
        convpsi = conv2d(convadd, Wpsi) + Bpsi
        convpsi = normalizationlayer(convpsi, phase, height=height, width=width, norm_type='group',
                                     scope=str(scope) + 'normpsi')
        convpsi = tf.nn.sigmoid(convpsi)
        # generate attention gat coe
        attengatx = tf.multiply(x, convpsi)
        return attengatx
Beispiel #2
0
def deconv_relu_drop(x, kernalshape, samefeture=False, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[-1],
                               n_outputs=kernalshape[-2], activefunction='relu', variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-2]], variable_name=str(scope) + 'B')
        dconv = tf.nn.relu(deconv2d(x, W, samefeature=samefeture) + B)
        return dconv
Beispiel #3
0
def conv_relu(x, kernalshape, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2],
                               n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B')
        conv = conv2d(x, W) + B
        conv = tf.nn.relu(conv)
        return conv
Beispiel #4
0
def down_sampling(x, kernalshape, phase, drop_conv, height=None, width=None, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2],
                               n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B')
        conv = conv2d(x, W, 2) + B
        conv = normalizationlayer(conv, phase, height=height, width=width, norm_type='group', scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop_conv)
        return conv
Beispiel #5
0
def conv_sigmod(x, kernalshape, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape,
                               n_inputs=kernalshape[0] * kernalshape[1] *
                               kernalshape[2],
                               n_outputs=kernalshape[-1],
                               activefuncation='sigomd',
                               variable_name=scope + 'W')
        B = bias_variable([kernalshape[-1]], variable_name=scope + 'B')
        conv = conv2d(x, W) + B
        conv = tf.nn.sigmoid(conv)
        return conv
def full_connected_relu_drop(x, kernal, drop, activefunction='relu', scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1],
                               n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        FC = tf.matmul(x, W) + B
        if activefunction == 'relu':
            FC = tf.nn.relu(FC)
            FC = tf.nn.dropout(FC, drop)
        elif activefunction == 'softmax':
            FC = tf.nn.softmax(FC)
        return FC