Ejemplo n.º 1
0
def res_layer(inp, num_features1, stride):
    num_features2 = num_features1 * 4
    shape = inp.get_shape()
    [seq_len, inp_width, num_channels] = [int(shape[i]) for i in [1, 2, 4]]
    #[_, seq_len, inp_width, _, num_channels] = [int(i) for i in list(inp.get_shape())]

    inputs = tf.reshape(inp, [-1, inp_width, inp_width, num_channels])

    if num_channels == num_features2:
        o_l = inputs
    else:
        b_l = bias(num_features2, 0.2)
        w_l = weights([1, 1, num_channels, num_features2], 0.04)
        o_l = conv2d(inputs, b_l, w_l, stride)

    b1_r = bias(num_features1, 0.2)
    w1_r = weights([1, 1, num_channels, num_features1], 0.04)
    conv1_r = tf.nn.relu(batch_norm(conv2d(inputs, b1_r, w1_r, stride)))

    b2_r = bias(num_features1, 0.2)
    w2_r = weights([3, 3, num_features1, num_features1], 0.04)
    conv2_r = tf.nn.relu(batch_norm(conv2d(conv1_r, b2_r, w2_r, 1)))

    b3_r = bias(num_features2, 0.2)
    w3_r = weights([1, 1, num_features1, num_features2], 0.04)
    conv3_r = conv2d(conv2_r, b3_r, w3_r, 1)

    out = tf.nn.relu(batch_norm(tf.add(o_l, conv3_r)))

    shape = out.get_shape()
    [out_width, out_features] = [int(shape[i]) for i in [1, 3]]
    #[_, out_width, _, out_features] = [int(i) for i in list(out.get_shape())]

    return tf.reshape(out, [-1, seq_len, out_width, out_width, out_features])
Ejemplo n.º 2
0
    def caps_conv(self,
                  ksize,
                  outdim,
                  outcaps,
                  stride=1,
                  activation='l2',
                  usebias=True):
        print('Caps_conv_bias:', usebias)
        # resize the input to [BSIZE, height, width, capsnum, vecdim]
        capsnum = self.inpsize[3]
        vecdim = self.inpsize[4]
        stride_ = [1, stride, stride, capsnum, 1]
        with tf.variable_scope('CapsConv_' + str(self.layernum)):
            res = []
            for i in range(outcaps):
                with tf.variable_scope('CapsConv_3dConv_' + str(i)):
                    k = L.weight([ksize, ksize, capsnum, vecdim, outdim])
                    buff = tf.nn.conv3d(self.result, k, stride_, 'SAME')
                    res.append(buff)
            self.result = tf.concat(res, axis=3)
            if usebias:
                b = L.bias([1, 1, 1, outcaps, outdim])
                self.result += b
            if activation == 'l2':
                self.result = tf.nn.l2_normalize(self.result, -1)
        self.layernum += 1
        self.inpsize = self.result.get_shape().as_list()

        return self.result
Ejemplo n.º 3
0
def net(name,image,output0,trainable = True,reuse = None):
    with tf.variable_scope(name,reuse=reuse):
        params = []
        #conv bias 42->40
        conv1,k1 = layers.conv(name + "conv1",image,3,3,1,1,"VALID",1,16,trainable)
        bias1,b1 = layers.bias(name + "bias1",conv1,16,trainable)
        relu1 = layers.relu(name + "relu1",bias1)
        params += [k1,b1]
        #pool 40->20
        pool1 = layers.pooling(name + "pool1",relu1,2,2,2,2)
	#conv bias 20->20
        conv2,k2 = layers.conv(name + "conv2",pool1,3,3,1,1, "SAME",16,32,trainable)
        bias2,b2 = layers.bias(name + "bias2",conv2,32,trainable)
        relu2 = layers.relu(name + "relu2",bias2)
        params += [k2,b2]
        #conv bias 20->20
        conv2_,k2_ = layers.conv(name + "conv2_",relu2,3,3,1,1, "SAME",32,32,trainable)
        bias2_,b2_ = layers.bias(name + "bias2_",conv2_,32,trainable)
        relu2_ = layers.relu(name + "relu2_",bias2_)
        params += [k2_,b2_]
        #pool 20->10
        pool2 = layers.pooling(name + "pool2",relu2_,2,2,2,2)
	#conv bias 10->10
        conv3,k3 = layers.conv(name + "conv3",pool2,3,3,1,1,"SAME",32,64,trainable)
        bias3,b3 = layers.bias(name + "bias3",conv3,64,trainable)
        relu3 = layers.relu(name + "relu3",bias3)
        params += [k3,b3]
        #conv bias 10->10
        conv3_,k3_ = layers.conv(name + "conv3_",relu3,3,3,1,1,"SAME",64,64,trainable)
        bias3_,b3_ = layers.bias(name + "bias3_",conv3_,64,trainable)
        relu3_ = layers.relu(name + "relu3_",bias3_)
        params += [k3_,b3_]
        #pool 10->5
        pool3 = layers.pooling(name + "pool3",relu3_,2,2,2,2)
        #conv4 5->3
        conv4,k4 = layers.conv(name + "conv4",pool3,3,3,1,1,"VALID",64,128,trainable)
        bias4,b4 = layers.bias(name + "bias4",conv4,128,trainable)
        relu4 = layers.relu(name + "relu4",bias4)
        params += [k4,b4]
        #conv5 3->1
        conv5,k5 = layers.conv(name + "conv5",relu4,3,3,1,1,"VALID",128,128,trainable)
        bias5,b5 = layers.bias(name + "bias5",conv5,128,trainable)
        relu5 = layers.relu(name + "relu5",bias5)
        params += [k5,b5]
        #fcn
        feature0,dim0 = layers.reshapeToLine(relu5)
        fcn1,k6  = layers.fcn(name + "fcn1",feature0,dim0,output0,trainable)
        fcn1_bias,b6 = layers.bias(name + "fcn1_bias",fcn1,output0,trainable)
        params += [k6,b6]

        return fcn1_bias,params
def classification_network(x_h, x_m, y, test=False):
    with tf.variable_scope("network") as sc:
        if test:
            sc.reuse_variables()

        with tf.variable_scope("hand"):
            flat_h = network_arm(x_h)
        with tf.variable_scope("main"):
            flat_m = network_arm(x_m)

        combined = tf.concat(2, [flat_h, flat_m])
        flat = gru_last(combined, 512 * 2, 2, batch_size, "lstm")

        with tf.variable_scope("out") as sc:
            b_output = bias(20, 0.1)
            w_output = weights([1024, 20], 0.02)

        output = tf.matmul(flat, w_output) + b_output
        return output, tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(output, y))
Ejemplo n.º 5
0
o_res8_m = res_layer(o_res7_m, 64, 1)
_, _, _, o_m = layer(o_res8_m, 1, [1, 1])
#flat_m = flatten(tf.squeeze(o_m))

flat_m = flatten_multi(o_m)
#b_fc1_m, w_fc1_m, h_fc1_m = dense_multi(flat_m, int(flat_m.get_shape()[2]), 256, 0.1, 0.02)

#combined = tf.concat(1, [flat_h, flat_m])
combined = tf.concat(2, [flat_h, flat_m])

# In[6]:

#output = combined

flat = lstm_variable_last(x_m, combined, 512 * 2, 2, batch_size, "lstm")
b_output = bias(20, 0.1)
w_output = weights([1024, 20], 0.02)
output = tf.nn.softmax(tf.matmul(flat, w_output) + b_output)

cross_entropy = -tf.reduce_sum(y * tf.log(output))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# In[7]:
print("Initializing variables", flush=True)

CHALAP = "/home/aparulekar-ms/chalap"
MODEL = "3d-resnet-bn-mono"

# Create variables