Exemple #1
0
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = relu(fully_connected(incoming, 512))
        output1 = dropout(output, 0.8)

        print(config.batch_size, image.shape)
        output = relu(
            fully_connected(
                tf.reshape(image, [config.batch_size, 32 * 32 * 3]), 512))
        output2 = dropout(output, 0.8)

        output = tf.concat([output1, output2], axis=-1)

        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.5)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.8)

        output = fully_connected(output, 10)

    return output
Exemple #2
0
    def residual_block(self,
                       input_,
                       n_filters,
                       filter_size,
                       n_blocks,
                       stride=1):
        strides = stride

        for block in range(n_blocks):
            n, h, w, c = input_.get_shape().as_list()
            if block > 0:
                strides = 1

            conv1 = conv_2d(input_,
                            n_filters,
                            filter_size,
                            strides=strides,
                            activation='linear')
            act1 = relu(bn(conv1))

            conv2 = conv_2d(act1,
                            n_filters,
                            filter_size,
                            strides=1,
                            activation='linear')
            act2 = bn(conv2)

            if stride != 1 and block == 0:
                input_ = max_pool_2d(input_, [1, 3], [1, stride])
            if n_filters != c:
                input_ = conv_2d(input_, n_filters, 1, activation='linear')

            input_ = relu(input_ + act2)

        return input_
Exemple #3
0
def code_classifier_forward(config, incoming=None, image=None,
                            scope="code_classifier", name=None, reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = leaky_relu(fully_connected(tf.reshape(image, [config.batch_size, 28 * 28]), 512))
        prod = tf.matmul(code_output[:, :, None], output[:, None, :])

        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([code_output,
                            tf.matmul(prob, output[:, :, None])[:, :, 0],
                            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]], axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 10)

    return output
Exemple #4
0
def classifier_forward(config,
                       incoming,
                       name=None,
                       reuse=False,
                       scope="classifier"):
    with tf.variable_scope(scope, name, reuse=reuse):
        network = incoming
        network = relu(
            batch_normalization(
                conv_2d(network,
                        32,
                        5,
                        activation='relu',
                        regularizer="L2",
                        strides=2)))
        network = relu(
            batch_normalization(
                conv_2d(network,
                        64,
                        5,
                        activation='relu',
                        regularizer="L2",
                        strides=2)))
        network = flatten(network)

        network = relu(batch_normalization(fully_connected(network, 1024)))
        network = dropout(network, 0.5)

        network = fully_connected(network, 10)

    return network
Exemple #5
0
 def block17(net, scale=1.0, activation="relu"):
     tower_conv = relu(
         batch_normalization(
             conv_2d(net,
                     192,
                     1,
                     bias=False,
                     activation=None,
                     name='Conv2d_1x1')))
     tower_conv_1_0 = relu(
         batch_normalization(
             conv_2d(net,
                     128,
                     1,
                     bias=False,
                     activation=None,
                     name='Conv2d_0a_1x1')))
     tower_conv_1_1 = relu(
         batch_normalization(
             conv_2d(tower_conv_1_0,
                     160, [1, 7],
                     bias=False,
                     activation=None,
                     name='Conv2d_0b_1x7')))
     tower_conv_1_2 = relu(
         batch_normalization(
             conv_2d(tower_conv_1_1,
                     192, [7, 1],
                     bias=False,
                     activation=None,
                     name='Conv2d_0c_7x1')))
     tower_mixed = merge([tower_conv, tower_conv_1_2],
                         mode='concat',
                         axis=3)
     tower_out = relu(
         batch_normalization(
             conv_2d(tower_mixed,
                     net.get_shape()[3],
                     1,
                     bias=False,
                     activation=None,
                     name='Conv2d_1x1')))
     net += scale * tower_out
     if activation:
         if isinstance(activation, str):
             net = activations.get(activation)(net)
         elif hasattr(activation, '__call__'):
             net = activation(net)
         else:
             raise ValueError("Invalid Activation.")
     return net
Exemple #6
0
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise,
                                 4 * 4 * 8 * config.gen_dim,
                                 name="input")
        output = tf.reshape(output, [-1, 4, 4, 8 * config.gen_dim])
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   4 * config.gen_dim,
                                   5, [8, 8],
                                   name="conv1",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   2 * config.gen_dim,
                                   5, [16, 16],
                                   name="conv2",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   config.gen_dim,
                                   5, [32, 32],
                                   name="conv3",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   3,
                                   5, [64, 64],
                                   name="conv4",
                                   strides=2)
        output = tf.tanh(output)

    return output
Exemple #7
0
def resLayer(x, filters, stride=1):
    network = conv_2d(x, filters, 3, activation=None, strides=stride)
    network = batch_normalization(network)
    network = relu(network)
    network = conv_2d(network, filters, 3, activation=None)
    network = batch_normalization(network)
    if stride != 1:
        x = max_pool_2d(x, 2)
        x = conv_2d(x, filters, 1)

    network = x + network
    network = relu(network)

    return network
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = conv_2d(image, 32, 5, 2, name="conv1")
        output = residual_block(output,
                                2,
                                32,
                                downsample=True,
                                batch_norm=True,
                                name="rb1")
        output = residual_block(output,
                                1,
                                64,
                                downsample=True,
                                batch_norm=True,
                                name="rb2")
        output = leaky_relu(
            fully_connected(
                tf.reshape(output, [config.batch_size, 4 * 4 * 64]), 1024))

        prod = tf.matmul(code_output[:, :, None], output[:, None, :])
        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([
            code_output,
            tf.matmul(prob, output[:, :, None])[:, :, 0],
            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]
        ],
                           axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 5)

    return output
Exemple #9
0
def alchNetEnhance(img_prep, img_aug, learning_rate):
    network = input_data(shape=[None, 64, 64, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    network = conv_2d(network, 64, 3, activation=None)
    network = batch_normalization(network)
    network = relu(network)

    network = resLayer(network, 64)
    network = resLayer(network, 64)
    network = resLayer(network, 128, stride=2)
    network = resLayer(network, 128)
    network = resLayer(network, 256, stride=2)
    network = resLayer(network, 256)
    network = resLayer(network, 512, stride=2)
    network = resLayer(network, 512)

    network = fully_connected(network, 1024, activation='relu')
    network = batch_normalization(network,
                                  stddev=0.002,
                                  trainable=True,
                                  restore=True,
                                  reuse=False)
    network = dropout(network, 0.5)

    network = fully_connected(network, 200, activation='softmax')
    network = regression(network,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=learning_rate)
    return network
Exemple #10
0
	def residual_block_1D(incoming,out_channels,downsample=False, first=False, filt_len=16, dropout_prob=0.85, downsampleSecond=True):
		resnet = incoming
		in_channels = incoming.shape[-1].value
		strides = (2 if downsample else 1)
		dsLayer = (1 if downsampleSecond else 0)
		identity = resnet

		nConv = 2
		if first:
			resnet = conv_1d(resnet, out_channels, filt_len, strides,weights_init="variance_scaling")
			nConv = 1

		for i in range(nConv):
			resnet = batch_normalization(resnet)
			resnet = relu(resnet)
			resnet = dropout(resnet, dropout_prob)
			if downsample and i==dsLayer: #1 as in, second layer
				resnet = conv_1d(resnet,out_channels,filt_len, strides=1, weights_init="variance_scaling") #puts the downsampling on the first conv layer only
			else:
				resnet = conv_1d(resnet,out_channels,filt_len, strides, weights_init="variance_scaling")

		#Beginning of skip connection
		identity = max_pool_1d(identity,strides, strides)

		if in_channels != out_channels:

			ch = (out_channels - in_channels) // 2
			identity = tf.pad(identity,[[0,0],[0,0],[ch,ch]])
			in_channels = out_channels

		resnet = resnet + identity
		
		return resnet
Exemple #11
0
 def block8(net, scale=1.0, activation="relu"):
     tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
     tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
     tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
     tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
     tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
     tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
     net += scale * tower_out
     if activation:
         if isinstance(activation, str):
             net = activations.get(activation)(net)
         elif hasattr(activation, '__call__'):
             net = activation(net)
         else:
             raise ValueError("Invalid Activation.")
     return net
Exemple #12
0
def classifier_forward(config,
                       incoming,
                       name=None,
                       reuse=False,
                       scope="classifier"):
    with tf.variable_scope(scope, name, reuse=reuse):
        network = incoming
        network = relu(conv_2d(network, 32, 5, strides=2))
        network = relu(conv_2d(network, 64, 5, strides=2))
        network = flatten(network)

        network = relu(fully_connected(network, 1024))
        network = dropout(network, 0.7)

        network = fully_connected(network, 10)

    return network
Exemple #13
0
def res18_forward(incoming, scope=None, name="resnet_18", reuse=False):
    with tf.variable_scope(scope, default_name=name, reuse=reuse):
        network = conv_2d(incoming, 32, 5, 2, name="conv1",)
        network = residual_block(network, 2, 32, downsample=True, batch_norm=True, name="rb1")
        network = residual_block(network, 2, 64, downsample=True, batch_norm=True, name="rb2")
        network = residual_block(network, 2, 128, downsample=True, batch_norm=True, name="rb3")
        network = residual_block(network, 2, 256, downsample=True, batch_norm=True, name="rb4")
        network = relu(batch_normalization(fully_connected(network, 256, name="fc1")))
        network = fully_connected(network, 5, name="fc2")

    return network
Exemple #14
0
def ProteinNet(str_len, emb, lr, num_classes):
    in_layer = input_data([None, 1, str_len * 2 + 2, 1])
    indices = in_layer[:, 0, :2, 0]

    if emb > 1:
        lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, emb),
                     300,
                     return_seq=True)
    else:
        lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

    # lstm branch
    lstm2 = lstm(lstm1, 300, return_seq=True)
    lstm3 = lstm(lstm2, 300, return_seq=True)
    lstm4 = lstm(lstm3, 300)

    # cnn branch
    in_layer = bn(in_layer)
    conv1 = conv_2d(in_layer, 64, [1, 10], 1)
    norm1 = relu(bn(conv1))
    conv2 = conv_2d(norm1, 128, [1, 6], 2)
    norm2 = relu(bn(conv2))
    conv3 = conv_2d(norm2, 256, [1, 3], 2)
    norm3 = relu(bn(conv3))
    gap = tf.reshape(global_avg_pool(norm3), [-1, 256])

    # fully-connected branch
    fc_ind = fc(indices, 50, activation='tanh')
    fc_ind2 = fc(fc_ind, 50, activation='tanh')

    # merge lstm, conv, and fc layers
    merged = tf.concat([lstm4, gap, fc_ind2], 1)

    out = fc(merged, num_classes, activation='softmax')
    net = regression(out,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lr)
    model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

    return model
Exemple #15
0
def aprelu(incoming):
    in_channels = incoming.get_shape().as_list()[-1]
    scales_n = tf.reduce_mean(tf.reduce_mean(tf.minimum(incoming,0),axis=2,keep_dims=True),axis=1,keep_dims=True)
    scales_p = tf.reduce_mean(tf.reduce_mean(tf.maximum(incoming,0),axis=2,keep_dims=True),axis=1,keep_dims=True)
    scales = tf.concat([scales_n, scales_p],axis=3)
    scales = fully_connected(scales, in_channels, activation='linear',regularizer='L2',
                             weight_decay=0.0001,weights_init='variance_scaling')
    scales = relu(bn(scales))
    scales = fully_connected(scales, in_channels, activation='linear',regularizer='L2',
                             weight_decay=0.0001,weights_init='variance_scaling')
    scales = tflearn.activations.sigmoid(bn(scales))
    scales = tf.expand_dims(tf.expand_dims(scales,axis=1),axis=1)
    return tf.maximum(incoming, 0) + tf.multiply(scales, (incoming - tf.abs(incoming))) * 0.5
def fcLayers(inputs, c):
    '''
    Fully connected layers that can be used for feature extraction
    :param vector: inputs
    :param dict: config dictionary c
    :return: Add fully connected features layers to tf graph
    '''
    inputs = tf.layers.flatten(inputs)
    if c.fcfe.normalise:
         inputs = tf.div(tf.subtract(inputs,tf.reduce_min(inputs)),\
			 tf.subtract(tf.reduce_max(inputs), tf.reduce_min(inputs)))
    for s in c.fcfe.layers:
        inputs = relu(tflearn.fully_connected(inputs, s, weights_init=c.fcfe.w_init))
    return inputs
Exemple #17
0
def generator_forward(config,
                      labels,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")
        embed = fully_connected(labels, 8 * config.dim)
        noise = fully_connected(noise, 56 * config.dim)
        cat = relu(batch_normalization(tf.concat([embed, noise], axis=-1)))
        output = fully_connected(cat, 4 * 4 * 4 * config.dim)
        output = batch_normalization(output)
        output = tf.nn.relu(output)
        output = tf.reshape(output, [-1, 4, 4, 4 * config.dim])

        output = conv_2d_transpose(output,
                                   2 * config.dim,
                                   5, [8, 8],
                                   strides=2)
        output = output[:, :7, :7, :]
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output, config.dim, 5, [14, 14], strides=2)
        output = batch_normalization(output)
        output = tf.nn.relu(output)

        output = conv_2d_transpose(output, 1, 5, [28, 28], strides=2)

        output = tf.tanh(output)

    return output
Exemple #18
0
    def network(self):
        in_layer = input_data([None, 1, self.str_len * 2 + 2, 1])
        indices = in_layer[:, 0, :2, 0]

        if self.emb > 1:
            lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, self.emb),
                         300,
                         return_seq=True)
        else:
            lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

        # lstm branch
        lstm2 = lstm(lstm1, 300, return_seq=True)
        lstm3 = lstm(lstm2, 300, return_seq=True)
        lstm4 = lstm(lstm3, 300)

        # cnn branch
        in_layer = bn(in_layer)
        conv1 = conv_2d(in_layer, 64, [1, 7], 1)
        norm1 = relu(bn(conv1))
        block1 = self.residual_block(norm1, 128, [1, 3], 2, stride=2)
        block2 = self.residual_block(block1, 256, [1, 3], 2, stride=2)
        block3 = self.residual_block(block2, 512, [1, 3], 2)
        block4 = self.residual_block(block3, 1024, [1, 3], 2)
        n_out_filters = block4.get_shape().as_list()[-1]
        gap = tf.reshape(global_avg_pool(block4), [-1, n_out_filters])

        # fully-connected branch
        fc_ind = fc(indices, 100, activation='tanh')
        fc_ind2 = fc(fc_ind, 100, activation='tanh')

        # merge lstm, conv, and fc layers
        merged = tf.concat([lstm4, gap, fc_ind2], 1)

        out = fc(merged, self.num_classes,
                 activation='softmax')  # output layer

        # describe optimization
        net = regression(out,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=self.lr)

        # build model
        model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

        return model
Exemple #19
0
def generator_forward(config, noise=None, scope="generator", name=None, reuse=False, num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal([config.batch_size if num_samples == -1 else num_samples, 128], name="noise")

        output = fully_connected(noise, 6 * 6 * 8 * config.gen_dim, name="input")
        output = tf.reshape(output, [-1, 6, 6, 8 * config.gen_dim])

        output = residual_block_upsample(output, 8 * config.gen_dim, 5, name="conv1")
        output = residual_block_upsample(output, 4 * config.gen_dim, 5, name="conv2")
        output = residual_block_upsample(output, 2 * config.gen_dim, 5, name="conv3")

        output = batch_normalization(output)
        output = relu(output)
        output = conv_2d(output, 3, 3, name="conv4")
        output = tf.tanh(output)

    return output
def convLayers(inputs, c):
    '''
    Conv layers that can be used for feature extraction
    :param vector: inputs
    :param dict: config dictionary c
    :return: Add conv features layers to tf graph
    '''
    weights_init = tflearn.initializations.xavier()
    bias_init = tf.constant_initializer(0.1)
    #print(inputs.get_shape())
    inputs = tf.div(inputs, c.cnn.max_in)
    layer = 0
    for (o, k, s) in zip(c.cnn.outdim, c.cnn.kernels, c.cnn.stride):
        inputs = conv2d(inputs, o, [k, k], [s, s], name='conv' + str(layer), format=c.cnn.format)
        layer += 1                
    shape = inputs.get_shape().as_list()
    inputs = tf.reshape(inputs, [-1, reduce(lambda x, y: x * y, shape[1:])])
    return relu(tflearn.fully_connected(inputs, c.cnn.fc, weights_init=weights_init,bias_init=bias_init))
Exemple #21
0
def resNet(img_prep, img_aug, learning_rate):
    network = input_data(shape=[None, 64, 64, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    network = conv_2d(network, 64, 3, regularizer='L2', weight_decay=0.0001)
    network = residual_block(network, 2, 64)
    network = residual_block(network, 1, 128, downsample=True)
    network = residual_block(network, 1, 128)
    network = residual_block(network, 1, 256, downsample=True)
    network = residual_block(network, 1, 256)
    network = residual_block(network, 1, 512, downsample=True)
    network = residual_block(network, 1, 512)
    network = batch_normalization(network)
    network = relu(network)
    network = global_avg_pool(network)
    network = fully_connected(network, 200, activation='softmax')
    network = regression(network,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=learning_rate)
    return network
Exemple #22
0
def ImageNetInceptionV2(outnode,
                        model_name,
                        target,
                        opt,
                        learn_r,
                        epch,
                        dropout_keep_rate,
                        save_model=False):
    def block35(net, scale=1.0, activation="relu"):
        tower_conv = relu(
            batch_normalization(
                conv_2d(net,
                        32,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        tower_conv1_0 = relu(
            batch_normalization(
                conv_2d(net,
                        32,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(
            batch_normalization(
                conv_2d(tower_conv1_0,
                        32,
                        3,
                        bias=False,
                        activation=None,
                        name='Conv2d_0b_3x3')))
        tower_conv2_0 = relu(
            batch_normalization(
                conv_2d(net,
                        32,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_0a_1x1')))
        tower_conv2_1 = relu(
            batch_normalization(
                conv_2d(tower_conv2_0,
                        48,
                        3,
                        bias=False,
                        activation=None,
                        name='Conv2d_0b_3x3')))
        tower_conv2_2 = relu(
            batch_normalization(
                conv_2d(tower_conv2_1,
                        64,
                        3,
                        bias=False,
                        activation=None,
                        name='Conv2d_0c_3x3')))
        tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2],
                            mode='concat',
                            axis=3)
        tower_out = relu(
            batch_normalization(
                conv_2d(tower_mixed,
                        net.get_shape()[3],
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block17(net, scale=1.0, activation="relu"):
        tower_conv = relu(
            batch_normalization(
                conv_2d(net,
                        192,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        tower_conv_1_0 = relu(
            batch_normalization(
                conv_2d(net,
                        128,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_0a_1x1')))
        tower_conv_1_1 = relu(
            batch_normalization(
                conv_2d(tower_conv_1_0,
                        160, [1, 7],
                        bias=False,
                        activation=None,
                        name='Conv2d_0b_1x7')))
        tower_conv_1_2 = relu(
            batch_normalization(
                conv_2d(tower_conv_1_1,
                        192, [7, 1],
                        bias=False,
                        activation=None,
                        name='Conv2d_0c_7x1')))
        tower_mixed = merge([tower_conv, tower_conv_1_2],
                            mode='concat',
                            axis=3)
        tower_out = relu(
            batch_normalization(
                conv_2d(tower_mixed,
                        net.get_shape()[3],
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block8(net, scale=1.0, activation="relu"):
        tower_conv = relu(
            batch_normalization(
                conv_2d(net,
                        192,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        tower_conv1_0 = relu(
            batch_normalization(
                conv_2d(net,
                        192,
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(
            batch_normalization(
                conv_2d(tower_conv1_0,
                        224, [1, 3],
                        bias=False,
                        activation=None,
                        name='Conv2d_0b_1x3')))
        tower_conv1_2 = relu(
            batch_normalization(
                conv_2d(tower_conv1_1,
                        256, [3, 1],
                        bias=False,
                        name='Conv2d_0c_3x1')))
        tower_mixed = merge([tower_conv, tower_conv1_2], mode='concat', axis=3)
        tower_out = relu(
            batch_normalization(
                conv_2d(tower_mixed,
                        net.get_shape()[3],
                        1,
                        bias=False,
                        activation=None,
                        name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    # default = 0.8
    dropout_keep_prob = dropout_keep_rate

    network = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')
    conv1a_3_3 = relu(
        batch_normalization(
            conv_2d(network,
                    32,
                    3,
                    strides=2,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(
        batch_normalization(
            conv_2d(conv1a_3_3,
                    32,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(
        batch_normalization(
            conv_2d(conv2a_3_3,
                    64,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(
        batch_normalization(
            conv_2d(maxpool3a_3_3,
                    80,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(
        batch_normalization(
            conv_2d(conv3b_1_1,
                    192,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_5a_3x3')

    tower_conv = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    96,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    48,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    64,
                    5,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2_0,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3,
                                3,
                                strides=1,
                                padding='same',
                                name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(
        batch_normalization(
            conv_2d(tower_pool3_0,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge(
        [tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1],
        mode='concat',
        axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    activation=None,
                    padding='VALID',
                    name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    256,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(
        batch_normalization(
            conv_2d(tower_conv1_1,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net,
                             3,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(
        batch_normalization(
            conv_2d(tower_conv,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))

    tower_conv1 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1,
                    288,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='COnv2d_1a_3x3')))

    tower_conv2 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2,
                    288,
                    3,
                    bias=False,
                    name='Conv2d_0b_3x3',
                    activation=None)))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    320,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))

    tower_pool = max_pool_2d(net,
                             3,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool],
                mode='concat',
                axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(
        batch_normalization(
            conv_2d(net,
                    1536,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net,
                      net.get_shape().as_list()[1:3],
                      strides=2,
                      padding='VALID',
                      name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, outnode, activation='softmax')

    str_model_name = "{}_{}_{}_{}_{}_{}".format(model_name, target, opt,
                                                learn_r, epch,
                                                dropout_keep_rate)

    network = tflearn.regression(loss,
                                 optimizer=opt,
                                 loss='categorical_crossentropy',
                                 learning_rate=learn_r,
                                 name='targets')
    model = None
    if save_model:
        model = tflearn.DNN(
            network,
            checkpoint_path='../tflearnModels/{}'.format(str_model_name),
            best_checkpoint_path='../tflearnModels/bestModels/best_{}'.format(
                str_model_name),
            max_checkpoints=1,
            tensorboard_verbose=0,
            tensorboard_dir="../tflearnLogs/{}/".format(str_model_name))
    else:
        model = tflearn.DNN(network)

    return model
    def _build_model(self):

        # features
        # 随机生成embedding
        self.drug_embedding = weight_variable([num_drug, dim_drug])
        self.protein_embedding = weight_variable([num_protein, dim_protein])
        self.disease_embedding = weight_variable([num_disease, dim_disease])
        self.sideeffect_embedding = weight_variable(
            [num_sideeffect, dim_sideeffect])

        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.drug_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.protein_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.disease_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.sideeffect_embedding))

        # feature passing weights (maybe different types of nodes can use different weights)
        W0 = weight_variable([dim_pass + dim_drug, dim_drug])
        b0 = bias_variable([dim_drug])
        tf.add_to_collection('l2_reg',
                             tf.contrib.layers.l2_regularizer(1.0)(W0))

        # passing 1 times (can be easily extended to multiple passes)
        drug_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.drug_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.drug_chemical_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.drug_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
                       tf.matmul(self.drug_sideeffect_normalize, a_layer(self.sideeffect_embedding, dim_pass)) + \
                       tf.matmul(self.drug_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
                       self.drug_embedding], axis=1), W0) + b0), dim=1)
        drug_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.drug_drug_normalize, a_layer(self.drug_embedding, dim_pass)) * 4, \
                       self.drug_embedding], axis=1), W0) + b0), dim= 1)

        protein_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.protein_protein_normalize, a_layer(self.protein_embedding, dim_pass)) + \
                       tf.matmul(self.protein_sequence_normalize, a_layer(self.protein_embedding, dim_pass)) + \
                       tf.matmul(self.protein_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
                       tf.matmul(self.protein_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
                       self.protein_embedding], axis=1), W0) + b0), dim=1)
        # 改
        protein_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.protein_protein_normalize, a_layer(self.protein_embedding, dim_pass)) * 4, \
                       self.protein_embedding], axis=1), W0) + b0), dim=1)

        disease_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.disease_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.disease_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
                       self.disease_embedding], axis=1), W0) + b0), dim=1)
        disease_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.disease_drug_normalize, a_layer(self.drug_embedding, dim_pass))*2, \
                       self.disease_embedding], axis=1), W0) + b0), dim=1)

        sideeffect_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.sideeffect_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
                       self.sideeffect_embedding], axis=1), W0) + b0), dim=1)

        self.drug_representation = drug_vector1 + drug_vector2
        self.protein_representation = protein_vector1 + protein_vector2
        self.disease_representation = disease_vector1 + disease_vector2
        self.sideeffect_representation = sideeffect_vector1

        # reconstructing networks
        self.drug_drug_reconstruct = bi_layer(self.drug_representation,
                                              self.drug_representation,
                                              sym=True,
                                              dim_pred=dim_pred)
        self.drug_drug_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_drug_reconstruct - self.drug_drug),
                        (self.drug_drug_reconstruct - self.drug_drug)))

        # 药物相似度
        self.drug_chemical_reconstruct = bi_layer(self.drug_representation,
                                                  self.drug_representation,
                                                  sym=True,
                                                  dim_pred=dim_pred)
        self.drug_chemical_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_chemical_reconstruct - self.drug_chemical),
                        (self.drug_chemical_reconstruct - self.drug_chemical)))

        self.drug_disease_reconstruct = bi_layer(self.drug_representation,
                                                 self.disease_representation,
                                                 sym=False,
                                                 dim_pred=dim_pred)
        self.drug_disease_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_disease_reconstruct - self.drug_disease),
                        (self.drug_disease_reconstruct - self.drug_disease)))

        self.drug_sideeffect_reconstruct = bi_layer(
            self.drug_representation,
            self.sideeffect_representation,
            sym=False,
            dim_pred=dim_pred)
        self.drug_sideeffect_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.drug_sideeffect_reconstruct - self.drug_sideeffect),
                (self.drug_sideeffect_reconstruct - self.drug_sideeffect)))

        self.protein_protein_reconstruct = bi_layer(
            self.protein_representation,
            self.protein_representation,
            sym=True,
            dim_pred=dim_pred)
        self.protein_protein_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_protein_reconstruct - self.protein_protein),
                (self.protein_protein_reconstruct - self.protein_protein)))

        self.protein_sequence_reconstruct = bi_layer(
            self.protein_representation,
            self.protein_representation,
            sym=True,
            dim_pred=dim_pred)
        self.protein_sequence_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_sequence_reconstruct - self.protein_sequence),
                (self.protein_sequence_reconstruct - self.protein_sequence)))

        self.protein_disease_reconstruct = bi_layer(
            self.protein_representation,
            self.disease_representation,
            sym=False,
            dim_pred=dim_pred)
        self.protein_disease_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_disease_reconstruct - self.protein_disease),
                (self.protein_disease_reconstruct - self.protein_disease)))

        # 药物蛋白质重构
        self.drug_protein_reconstruct = bi_layer(self.drug_representation,
                                                 self.protein_representation,
                                                 sym=False,
                                                 dim_pred=dim_pred)
        tmp = tf.multiply(self.drug_protein_mask,
                          (self.drug_protein_reconstruct - self.drug_protein))
        self.drug_protein_reconstruct_loss = tf.reduce_sum(
            tf.multiply(tmp, tmp))

        self.l2_loss = tf.add_n(tf.get_collection("l2_reg"))

        self.loss = self.drug_protein_reconstruct_loss + 1.0 * (
            self.drug_drug_reconstruct_loss +
            self.drug_chemical_reconstruct_loss +
            self.drug_disease_reconstruct_loss +
            self.drug_sideeffect_reconstruct_loss +
            self.protein_protein_reconstruct_loss +
            self.protein_sequence_reconstruct_loss +
            self.protein_disease_reconstruct_loss) + self.l2_loss

        return self.drug_protein_reconstruct, self.loss, self.drug_protein_reconstruct_loss
def inception_v2(width, height, learning_rate):
    num_classes = 17
    dropout_keep_prob = 0.8

    network = input_data(shape=[None, width, height, 3])
    conv1a_3_3 = relu(
        batch_normalization(
            conv_2d(network,
                    32,
                    3,
                    strides=2,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(
        batch_normalization(
            conv_2d(conv1a_3_3,
                    32,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(
        batch_normalization(
            conv_2d(conv2a_3_3,
                    64,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(
        batch_normalization(
            conv_2d(maxpool3a_3_3,
                    80,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(
        batch_normalization(
            conv_2d(conv3b_1_1,
                    192,
                    3,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3,
                                3,
                                strides=2,
                                padding='VALID',
                                name='MaxPool_5a_3x3')

    tower_conv = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    96,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    48,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    64,
                    5,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(
        batch_normalization(
            conv_2d(maxpool5a_3_3,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2_0,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    96,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3,
                                3,
                                strides=1,
                                padding='same',
                                name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(
        batch_normalization(
            conv_2d(tower_pool3_0,
                    64,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge(
        [tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1],
        mode='concat',
        axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    activation=None,
                    padding='VALID',
                    name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1_0,
                    256,
                    3,
                    bias=False,
                    activation=None,
                    name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(
        batch_normalization(
            conv_2d(tower_conv1_1,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net,
                             3,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(
        batch_normalization(
            conv_2d(tower_conv,
                    384,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))

    tower_conv1 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(
        batch_normalization(
            conv_2d(tower_conv1,
                    288,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='COnv2d_1a_3x3')))

    tower_conv2 = relu(
        batch_normalization(
            conv_2d(net,
                    256,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(
        batch_normalization(
            conv_2d(tower_conv2,
                    288,
                    3,
                    bias=False,
                    name='Conv2d_0b_3x3',
                    activation=None)))
    tower_conv2_2 = relu(
        batch_normalization(
            conv_2d(tower_conv2_1,
                    320,
                    3,
                    bias=False,
                    strides=2,
                    padding='VALID',
                    activation=None,
                    name='Conv2d_1a_3x3')))

    tower_pool = max_pool_2d(net,
                             3,
                             strides=2,
                             padding='VALID',
                             name='MaxPool_1a_3x3')
    net = merge([tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool],
                mode='concat',
                axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(
        batch_normalization(
            conv_2d(net,
                    1536,
                    1,
                    bias=False,
                    activation=None,
                    name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net,
                      net.get_shape().as_list()[1:3],
                      strides=2,
                      padding='VALID',
                      name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes, activation='softmax')

    network = tflearn.regression(loss,
                                 optimizer='RMSprop',
                                 loss='categorical_crossentropy',
                                 learning_rate=learning_rate)
    #learning_rate=0.0001)
    model = tflearn.DNN(network,
                        checkpoint_path='inception_resnet_v2',
                        max_checkpoints=1,
                        tensorboard_verbose=2,
                        tensorboard_dir="./tflearn_logs/")
    return model
Exemple #25
0
def _model5():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    def block35(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None,name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 32, 3, bias=False, activation=None,name='Conv2d_0b_3x3')))
        tower_conv2_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 48,3, bias=False, activation=None, name='Conv2d_0b_3x3')))
        tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 64,3, bias=False, activation=None, name='Conv2d_0c_3x3')))
        tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block17(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv_1_0 = relu(batch_normalization(conv_2d(net, 128, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv_1_1 = relu(batch_normalization(conv_2d(tower_conv_1_0, 160,[1,7], bias=False, activation=None,name='Conv2d_0b_1x7')))
        tower_conv_1_2 = relu(batch_normalization(conv_2d(tower_conv_1_1, 192, [7,1], bias=False, activation=None,name='Conv2d_0c_7x1')))
        tower_mixed = merge([tower_conv,tower_conv_1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    def block8(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
        tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
        tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    num_classes = len(yTest[0])
    dropout_keep_prob = 0.8

    network = input_data(shape=[None, inputSize, inputSize, dim],
             name='input',
             data_preprocessing=img_prep,
             data_augmentation=img_aug)
    conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

    tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 1, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,1, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    
    ####
    net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes,activation='softmax')


    network = tflearn.regression(loss, optimizer='RMSprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    model = tflearn.DNN(network, checkpoint_path='inception_resnet_v2',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir="./tflearn_logs/")

    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Exemple #26
0
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net


X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(batch_normalization(
    conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID', activation=None, name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(
    batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID', activation=None, name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
conv3b_1_1 = relu(batch_normalization(
    conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID', activation=None, name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(batch_normalization(
    conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID', activation=None, name='Conv2d_4a_3x3')))
maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

tower_conv = relu(
    batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

tower_conv1_0 = relu(
    batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
Exemple #27
0
    def _build_model(self):
        # inputs
        self.drug_drug = tf.constant(drug_drug, tf.float32)
        self.drug_drug_normalize = tf.constant(drug_drug_normalize, tf.float32,
                                               [num_drug, num_drug])

        self.drug_chemical = tf.constant(drug_chemical, tf.float32,
                                         [num_drug, num_drug])
        self.drug_chemical_normalize = tf.constant(drug_chemical_normalize,
                                                   tf.float32,
                                                   [num_drug, num_drug])

        self.drug_disease = tf.constant(drug_disease, tf.float32,
                                        [num_drug, num_disease])
        self.drug_disease_normalize = tf.constant(drug_disease_normalize,
                                                  tf.float32,
                                                  [num_drug, num_disease])

        self.drug_sideeffect = tf.constant(drug_sideeffect, tf.float32,
                                           [num_drug, num_sideeffect])
        self.drug_sideeffect_normalize = tf.constant(
            drug_sideeffect_normalize, tf.float32, [num_drug, num_sideeffect])

        self.protein_protein = tf.constant(protein_protein, tf.float32,
                                           [num_protein, num_protein])
        self.protein_protein_normalize = tf.constant(
            protein_protein_normalize, tf.float32, [num_protein, num_protein])

        self.protein_sequence = tf.constant(protein_sequence, tf.float32,
                                            [num_protein, num_protein])
        self.protein_sequence_normalize = tf.constant(
            protein_sequence_normalize, tf.float32, [num_protein, num_protein])

        self.protein_disease = tf.constant(protein_disease, tf.float32,
                                           [num_protein, num_disease])
        self.protein_disease_normalize = tf.constant(
            protein_disease_normalize, tf.float32, [num_protein, num_disease])

        self.disease_drug = tf.constant(disease_drug, tf.float32,
                                        [num_disease, num_drug])
        self.disease_drug_normalize = tf.constant(disease_drug_normalize,
                                                  tf.float32,
                                                  [num_disease, num_drug])

        self.disease_protein = tf.constant(disease_protein, tf.float32,
                                           [num_disease, num_protein])
        self.disease_protein_normalize = tf.constant(
            disease_protein_normalize, tf.float32, [num_disease, num_protein])

        self.sideeffect_drug = tf.constant(sideeffect_drug, tf.float32,
                                           [num_sideeffect, num_drug])
        self.sideeffect_drug_normalize = tf.constant(
            sideeffect_drug_normalize, tf.float32, [num_sideeffect, num_drug])

        self.drug_protein = tf.placeholder(tf.float32, [num_drug, num_protein],
                                           name='drug_protein')
        self.drug_protein_normalize = tf.placeholder(
            tf.float32, [num_drug, num_protein], name='drug_protein_normalize')

        self.protein_drug = tf.placeholder(tf.float32, [num_protein, num_drug],
                                           name='protein_drug')
        self.protein_drug_normalize = tf.placeholder(
            tf.float32, [num_protein, num_drug], name='protein_drug_normalize')

        self.drug_protein_mask = tf.placeholder(tf.float32,
                                                [num_drug, num_protein],
                                                name='drug_protein_mask')

        # features
        # 随机生成embedding
        self.drug_embedding = weight_variable([num_drug, dim_drug])
        self.protein_embedding = weight_variable([num_protein, dim_protein])
        self.disease_embedding = weight_variable([num_disease, dim_disease])
        self.sideeffect_embedding = weight_variable(
            [num_sideeffect, dim_sideeffect])

        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.drug_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.protein_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.disease_embedding))
        tf.add_to_collection(
            'l2_reg',
            tf.contrib.layers.l2_regularizer(1.0)(self.sideeffect_embedding))

        # feature passing weights (maybe different types of nodes can use different weights)
        W0 = weight_variable([dim_pass + dim_drug, dim_drug], name='W0')
        b0 = bias_variable([dim_drug], name='b0')
        tf.add_to_collection('l2_reg',
                             tf.contrib.layers.l2_regularizer(1.0)(W0))
        # passing 1 times (can be easily extended to multiple passes)
        drug_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.drug_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.drug_chemical_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.drug_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
                       tf.matmul(self.drug_sideeffect_normalize, a_layer(self.sideeffect_embedding, dim_pass)) + \
                       tf.matmul(self.drug_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
                       self.drug_embedding], axis=1), W0) + b0), dim=1, name="drug_vector1")
        drug_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.drug_drug_normalize, a_layer(self.drug_embedding, dim_pass)) * 4, \
                       self.drug_embedding], axis=1), W0) + b0), dim=1, name="drug_vector2")

        protein_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.protein_protein_normalize, a_layer(self.protein_embedding, dim_pass)) + \
                       tf.matmul(self.protein_sequence_normalize, a_layer(self.protein_embedding, dim_pass)) + \
                       tf.matmul(self.protein_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
                       tf.matmul(self.protein_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
                       self.protein_embedding], axis=1), W0) + b0), dim=1, name="protein_vector1")
        # 改
        protein_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.protein_protein_normalize, a_layer(self.protein_embedding, dim_pass)) * 4, \
                       self.protein_embedding], axis=1), W0) + b0), dim=1, name="protein_vector2")

        disease_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.disease_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
                       tf.matmul(self.disease_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
                       self.disease_embedding], axis=1), W0) + b0), dim=1, name="disease_vector1")
        disease_vector2 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.disease_drug_normalize, a_layer(self.drug_embedding, dim_pass)) * 2, \
                       self.disease_embedding], axis=1), W0) + b0), dim=1, name="disease_vector2")

        sideeffect_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.sideeffect_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
                       self.sideeffect_embedding], axis=1), W0) + b0), dim=1, name="sideeffect_vector1")

        self.drug_representation = drug_vector1 + drug_vector2
        self.protein_representation = protein_vector1 + protein_vector2
        self.disease_representation = disease_vector1 + disease_vector2
        self.sideeffect_representation = sideeffect_vector1

        # print(type(self.drug_representation))

        # reconstructing networks
        self.drug_drug_reconstruct = bi_layer(self.drug_representation,
                                              self.drug_representation,
                                              sym=True,
                                              dim_pred=dim_pred,
                                              name='drug_drug_reconstruct')
        self.drug_drug_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_drug_reconstruct - self.drug_drug),
                        (self.drug_drug_reconstruct - self.drug_drug)))

        # 药物相似度
        self.drug_chemical_reconstruct = bi_layer(
            self.drug_representation,
            self.drug_representation,
            sym=True,
            dim_pred=dim_pred,
            name='drug_chemical_reconstruct')
        self.drug_chemical_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_chemical_reconstruct - self.drug_chemical),
                        (self.drug_chemical_reconstruct - self.drug_chemical)))

        self.drug_disease_reconstruct = bi_layer(
            self.drug_representation,
            self.disease_representation,
            sym=False,
            dim_pred=dim_pred,
            name='drug_disease_reconstruct')
        self.drug_disease_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.drug_disease_reconstruct - self.drug_disease),
                        (self.drug_disease_reconstruct - self.drug_disease)))

        self.drug_sideeffect_reconstruct = bi_layer(
            self.drug_representation,
            self.sideeffect_representation,
            sym=False,
            dim_pred=dim_pred,
            name='drug_sideeffect_reconstruct')
        self.drug_sideeffect_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.drug_sideeffect_reconstruct - self.drug_sideeffect),
                (self.drug_sideeffect_reconstruct - self.drug_sideeffect)))

        self.protein_protein_reconstruct = bi_layer(
            self.protein_representation,
            self.protein_representation,
            sym=True,
            dim_pred=dim_pred,
            name='protein_protein_reconstruct')
        self.protein_protein_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_protein_reconstruct - self.protein_protein),
                (self.protein_protein_reconstruct - self.protein_protein)))

        self.protein_sequence_reconstruct = bi_layer(
            self.protein_representation,
            self.protein_representation,
            sym=True,
            dim_pred=dim_pred,
            name='protein_sequence_reconstruct')
        self.protein_sequence_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_sequence_reconstruct - self.protein_sequence),
                (self.protein_sequence_reconstruct - self.protein_sequence)))

        self.protein_disease_reconstruct = bi_layer(
            self.protein_representation,
            self.disease_representation,
            sym=False,
            dim_pred=dim_pred,
            name='protein_disease_reconstruct')
        self.protein_disease_reconstruct_loss = tf.reduce_sum(
            tf.multiply(
                (self.protein_disease_reconstruct - self.protein_disease),
                (self.protein_disease_reconstruct - self.protein_disease)))

        self.drug_protein_reconstruct = bi_layer(
            self.drug_representation,
            self.protein_representation,
            sym=False,
            dim_pred=dim_pred,
            name='drug_protein_reconstruct')
        tmp = tf.multiply(self.drug_protein_mask,
                          (self.drug_protein_reconstruct - self.drug_protein))
        self.drug_protein_reconstruct_loss = tf.reduce_sum(
            tf.multiply(tmp, tmp))

        self.l2_loss = tf.add_n(tf.get_collection("l2_reg"))

        self.loss = self.drug_protein_reconstruct_loss + 1.0 * (
            self.drug_drug_reconstruct_loss +
            self.drug_chemical_reconstruct_loss +
            self.drug_disease_reconstruct_loss +
            self.drug_sideeffect_reconstruct_loss +
            self.protein_protein_reconstruct_loss +
            self.protein_sequence_reconstruct_loss +
            self.protein_disease_reconstruct_loss) + self.l2_loss
Exemple #28
0
def a_layer(x, units):
    W = weight_variable([x.get_shape().as_list()[1], units])
    b = bias_variable([units])
    tf.add_to_collection('l2_reg', tf.contrib.layers.l2_regularizer(1.0)(W))
    return relu(tf.matmul(x, W) + b)
    if activation:
        if isinstance(activation, str):
            net = activations.get(activation)(net)
        elif hasattr(activation, '__call__'):
            net = activation(net)
        else:
            raise ValueError("Invalid Activation.")
    return net

X, Y = oxflower17.load_data(one_hot=True, resize_pics=(299, 299))

num_classes = 17
dropout_keep_prob = 0.8

network = input_data(shape=[None, 299, 299, 3])
conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))
Exemple #30
0
def network(img_shape, name, LR):

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    #
    # # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_blur (sigma_max=3.0)
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    img_aug.add_random_90degrees_rotation(rotations=[0, 2])    

    network = input_data(shape=img_shape, name=name, data_preprocessing=img_prep, data_augmentation=img_aug  ) 
    conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

    tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)

    tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    # tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    # tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None)))
    # tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))

    # tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes,activation='softmax')


    network = tflearn.regression(loss, optimizer='RMSprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001, name='targets')
    return network
Exemple #31
0
    def _build_model(self):


        # inputs
        self.RD = tf.placeholder(tf.float32, [num_R, num_D])
        self.RD_normalize = tf.placeholder(tf.float32, [num_R, num_D])
        self.DR = tf.placeholder(tf.float32, [num_D, num_R])
        self.DR_normalize = tf.placeholder(tf.float32, [num_D, num_R])


        self.DS1 = tf.placeholder(tf.float32, [num_D, num_D])
        self.DS2 = tf.placeholder(tf.float32, [num_D, num_D])
        self.DSfc = tf.placeholder(tf.float32, [num_D, num_D])
        self.DSgs = tf.placeholder(tf.float32, [num_D, num_D])
        self.DS_normalize1 = tf.placeholder(tf.float32, [num_D, num_D])
        self.DS_normalize2 = tf.placeholder(tf.float32, [num_D, num_D])
        self.DSfc_normalize = tf.placeholder(tf.float32, [num_D, num_D])
        self.DSgs_normalize = tf.placeholder(tf.float32, [num_D, num_D])

        self.RS = tf.placeholder(tf.float32, [num_R, num_R])
        self.RSgs = tf.placeholder(tf.float32, [num_R, num_R])
        self.RSsq = tf.placeholder(tf.float32, [num_R, num_R])

        self.RS_normalize = tf.placeholder(tf.float32, [num_R, num_R])
        self.RSgs_normalize = tf.placeholder(tf.float32, [num_R, num_R])
        self.RSsq_normalize = tf.placeholder(tf.float32, [num_R, num_R])


        self.RSmask = tf.placeholder(tf.float32, [num_R, num_R])
        self.DSmask1 = tf.placeholder(tf.float32, [num_D, num_D])
        self.DSmask2 = tf.placeholder(tf.float32, [num_D, num_D])
        self.RD_mask = tf.placeholder(tf.float32, [num_R, num_D])
        # features
        self.Dembedding = weight_variable([num_D, dim_D])
        self.Rembedding = weight_variable([num_R, dim_R])


        W0 = weight_variable([dim_pass + dim_R, dim_R])
        b0 = bias_variable([dim_R])

        # passing 1 times (can be easily extended to multiple passes)
        R_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.RD_normalize, a_layer(self.Dembedding, dim_pass)) + \
                       tf.matmul(self.RS_normalize, a_layer(self.Rembedding, dim_pass))+ \
                       tf.matmul(self.RSgs_normalize, a_layer(self.Rembedding, dim_pass))+ \
                       tf.matmul(self.RSsq_normalize, a_layer(self.Rembedding, dim_pass)), \
                       self.Rembedding], axis=1), W0) + b0), dim=1)

        D_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.DR_normalize, a_layer(self.Rembedding, dim_pass)) + \
                       tf.matmul(self.DS_normalize1, a_layer(self.Dembedding, dim_pass))+ \
                       tf.matmul(self.DS_normalize2, a_layer(self.Dembedding, dim_pass))+ \
                       tf.matmul(self.DSgs_normalize, a_layer(self.Dembedding, dim_pass))+ \
                       tf.matmul(self.DSfc_normalize, a_layer(self.Dembedding, dim_pass)), \
                       self.Dembedding], axis=1), W0) + b0), dim=1)



        self.R_representation = R_vector1
        self.D_representation = D_vector1



        # reconstructing networks
        self.R_reconstruct = bi_layer(self.R_representation, self.R_representation, sym=True, dim_pred=dim_pred)
        RStmp = (self.R_reconstruct - self.RS)
        #RStmp = tf.multiply(self.RSmask, (self.R_reconstruct - self.RS))
        # RStempu = tf.multiply((tf.ones((num_R, num_R)) - self.RSmask), (self.R_reconstruct - self.RS))
        self.R_reconstruct_loss = tf.reduce_sum(tf.multiply(RStmp, RStmp))# +0.21*tf.reduce_sum(tf.multiply(RStempu, RStempu))

        self.Rgs_reconstruct = bi_layer(self.R_representation, self.R_representation, sym=True, dim_pred=dim_pred)
        RSgstmp = (self.Rgs_reconstruct - self.RSgs)
        #RSgstmp = tf.multiply(self.RSmask, (self.R_reconstruct - self.RSgs))
        # RStempu = tf.multiply((tf.ones((num_R, num_R)) - self.RSmask), (self.R_reconstruct - self.RS))
        self.Rgs_reconstruct_loss = tf.reduce_sum(tf.multiply(RSgstmp, RSgstmp))

        self.Rsq_reconstruct = bi_layer(self.R_representation, self.R_representation, sym=True, dim_pred=dim_pred)
        RSsqtmp = (self.Rsq_reconstruct - self.RSsq)
        #RSfctmp = tf.multiply(self.RSmask, (self.R_reconstruct - self.RSgs))
        # RStempu = tf.multiply((tf.ones((num_R, num_R)) - self.RSmask), (self.R_reconstruct - self.RS))
        self.Rsq_reconstruct_loss = tf.reduce_sum(tf.multiply(RSsqtmp, RSsqtmp))


        self.D_reconstruct1 = bi_layer(self.D_representation, self.D_representation, sym=True, dim_pred=dim_pred)
        #DStmp1 = tf.multiply(self.DSmask1, (self.D_reconstruct1 - self.DS1))
        DStmp1 = (self.D_reconstruct1 - self.DS1)
        DStempu1 = tf.multiply((tf.ones((num_D, num_D)) - self.DS1), (self.D_reconstruct1 - self.DS1))
        self.D_reconstruct_loss1 = tf.reduce_sum(tf.multiply(DStmp1, DStmp1)) #+0.1*tf.reduce_sum(tf.multiply(DStempu1, DStempu1))

        self.D_reconstruct2 = bi_layer(self.D_representation, self.D_representation, sym=True, dim_pred=dim_pred)
        #DStmp2 = tf.multiply(self.DSmask2, (self.D_reconstruct2 - self.DS2))
        DStmp2 = (self.D_reconstruct2 - self.DS2)
        DStempu2 = tf.multiply((tf.ones((num_D, num_D)) - self.DSmask2), (self.D_reconstruct2 - self.DS2))
        self.D_reconstruct_loss2 = tf.reduce_sum(tf.multiply(DStmp2, DStmp2)) #+0.1*tf.reduce_sum(tf.multiply(DStempu2, DStempu2))

        self.Dfc_reconstruct = bi_layer(self.D_representation, self.D_representation, sym=True, dim_pred=dim_pred)
        #DStmp1 = tf.multiply(self.DSmask1, (self.D_reconstruct1 - self.DS_normalize1))
        DSfctmp = (self.Dfc_reconstruct - self.DSfc)
        #DStempu1 = tf.multiply((tf.ones((num_D, num_D)) - self.DS1), (self.D_reconstruct1 - self.DS1))
        self.Dfc_reconstruct_loss = tf.reduce_sum(tf.multiply(DSfctmp, DSfctmp))

        self.Dgs_reconstruct = bi_layer(self.D_representation, self.D_representation, sym=True, dim_pred=dim_pred)
        #DSgstmp = tf.multiply(self.DSmask1, (self.D_reconstruct1 - self.DS_normalize1))
        DSgstmp = (self.Dgs_reconstruct - self.DSgs)
        #DStempu1 = tf.multiply((tf.ones((num_D, num_D)) - self.DS1), (self.D_reconstruct1 - self.DS1))
        self.Dgs_reconstruct_loss = tf.reduce_sum(tf.multiply(DSgstmp, DSgstmp))


        self.RD_reconstruct = bi_layer(self.R_representation, self.D_representation, sym=False, dim_pred=dim_pred)

        tmp = tf.multiply(self.RD_mask, (self.RD_reconstruct - self.RD))
        tmp = self.RD_reconstruct - self.RD
        tmpu = tf.multiply((tf.ones((num_R, num_D)) - self.RD_mask), (self.RD_reconstruct - self.RD))

        self.DR_reconstruct_loss = tf.reduce_sum(tf.multiply(tmp, tmp))#+0.03*tf.reduce_sum(tf.multiply(tmpu, tmpu))

        self.loss = self.DR_reconstruct_loss + 2* (
        (self.R_reconstruct_loss+self.Rsq_reconstruct_loss+self.Rgs_reconstruct_loss) +
        (self.D_reconstruct_loss1+ self.D_reconstruct_loss2+ self.Dfc_reconstruct_loss+ self.Dgs_reconstruct_loss))  # + tf.reduce_sum(tf.multiply(W0,W0 ))+ tf.reduce_sum(tf.multiply(b0,b0 ))
Exemple #32
0
    def _build_model(self):
        #inputs
        self.rna_rna = tf.placeholder(tf.float32, [num_rna, num_rna])
        self.rna_rna_normalize = tf.placeholder(tf.float32, [num_rna, num_rna])

        self.dis_dis = tf.placeholder(tf.float32, [num_dis, num_dis])
        self.dis_dis_normalize = tf.placeholder(tf.float32, [num_dis, num_dis])

        self.rna_dis = tf.placeholder(tf.float32, [num_rna, num_dis])
        self.rna_dis_normalize = tf.placeholder(tf.float32, [num_rna, num_dis])

        self.dis_rna = tf.placeholder(tf.float32, [num_dis, num_rna])
        self.dis_rna_normalize = tf.placeholder(tf.float32, [num_dis, num_rna])

        self.rna_dis_mask = tf.placeholder(tf.float32, [num_rna, num_dis])

        #features
        self.rna_embedding = weight_variable([num_rna, dim_rna])
        self.dis_embedding = weight_variable([num_dis, dim_dis])

        #feature passing weights (maybe different types of nodes can use different weights)
        W0 = weight_variable([dim_pass + dim_rna, dim_rna])
        b0 = bias_variable([dim_rna])

        rna_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([\
            tf.matmul(self.rna_rna_normalize, a_layer(self.rna_embedding, dim_pass)) + \
            tf.matmul(self.rna_dis_normalize, a_layer(self.dis_embedding, dim_pass)), \
            self.rna_embedding], axis=1), W0)+b0),dim=1)

        dis_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([\
            tf.matmul(self.dis_dis_normalize, a_layer(self.dis_embedding, dim_pass)) + \
            tf.matmul(self.dis_rna_normalize, a_layer(self.rna_embedding, dim_pass)) , \
            self.dis_embedding], axis=1), W0)+b0),dim=1)

        self.rna_representation = rna_vector1
        self.dis_representation = dis_vector1

        #reconstructing networks
        self.rna_rna_reconstruct = bi_layer(self.rna_representation,
                                            self.rna_representation,
                                            sym=True,
                                            dim_pred=dim_pred)
        self.rna_rna_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.rna_rna_reconstruct - self.rna_rna),
                        (self.rna_rna_reconstruct - self.rna_rna)))

        self.dis_dis_reconstruct = bi_layer(self.dis_representation,
                                            self.dis_representation,
                                            sym=True,
                                            dim_pred=dim_pred)
        self.dis_dis_reconstruct_loss = tf.reduce_sum(
            tf.multiply((self.dis_dis_reconstruct - self.dis_dis),
                        (self.dis_dis_reconstruct - self.dis_dis)))

        self.rna_dis_reconstruct = bi_layer(self.rna_representation,
                                            self.dis_representation,
                                            sym=False,
                                            dim_pred=dim_pred)
        tmp = tf.multiply(self.rna_dis_mask,
                          (self.rna_dis_reconstruct - self.rna_dis))
        self.rna_dis_reconstruct_loss = tf.reduce_sum(tf.multiply(tmp, tmp))

        self.loss = self.rna_dis_reconstruct_loss + 1.0*(self.rna_rna_reconstruct_loss + \
                                                            self.dis_dis_reconstruct_loss)
Exemple #33
0
    def _build_model(self):
        #inputs
        self.drug_drug = tf.placeholder(tf.float32, [num_drug, num_drug])
        self.drug_drug_normalize = tf.placeholder(tf.float32, [num_drug, num_drug])

        self.drug_chemical = tf.placeholder(tf.float32, [num_drug, num_drug])
        self.drug_chemical_normalize = tf.placeholder(tf.float32, [num_drug, num_drug])

        self.drug_disease = tf.placeholder(tf.float32, [num_drug, num_disease])
        self.drug_disease_normalize = tf.placeholder(tf.float32, [num_drug, num_disease])

        self.drug_sideeffect = tf.placeholder(tf.float32, [num_drug, num_sideeffect])
        self.drug_sideeffect_normalize = tf.placeholder(tf.float32, [num_drug, num_sideeffect])

        
        self.protein_protein = tf.placeholder(tf.float32, [num_protein, num_protein])
        self.protein_protein_normalize = tf.placeholder(tf.float32, [num_protein, num_protein])

        self.protein_sequence = tf.placeholder(tf.float32, [num_protein, num_protein])
        self.protein_sequence_normalize = tf.placeholder(tf.float32, [num_protein, num_protein])

        self.protein_disease = tf.placeholder(tf.float32, [num_protein, num_disease])
        self.protein_disease_normalize = tf.placeholder(tf.float32, [num_protein, num_disease])
        
        self.disease_drug = tf.placeholder(tf.float32, [num_disease, num_drug])
        self.disease_drug_normalize = tf.placeholder(tf.float32, [num_disease, num_drug])

        self.disease_protein = tf.placeholder(tf.float32, [num_disease, num_protein])
        self.disease_protein_normalize = tf.placeholder(tf.float32, [num_disease, num_protein])

        self.sideeffect_drug = tf.placeholder(tf.float32, [num_sideeffect, num_drug])
        self.sideeffect_drug_normalize = tf.placeholder(tf.float32, [num_sideeffect, num_drug])

        self.drug_protein = tf.placeholder(tf.float32, [num_drug, num_protein])
        self.drug_protein_normalize = tf.placeholder(tf.float32, [num_drug, num_protein])

        self.protein_drug = tf.placeholder(tf.float32, [num_protein, num_drug])
        self.protein_drug_normalize = tf.placeholder(tf.float32, [num_protein, num_drug])

        self.drug_protein_mask = tf.placeholder(tf.float32, [num_drug, num_protein])

        #features
        self.drug_embedding = weight_variable([num_drug,dim_drug])
        self.protein_embedding = weight_variable([num_protein,dim_protein])
        self.disease_embedding = weight_variable([num_disease,dim_disease])
        self.sideeffect_embedding = weight_variable([num_sideeffect,dim_sideeffect])
        
        #feature passing weights (maybe different types of nodes can use different weights)
        W0 = weight_variable([dim_pass+dim_drug, dim_drug])
        b0 = bias_variable([dim_drug])

        #passing 1 times (can be easily extended to multiple passes)
        drug_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.drug_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
            tf.matmul(self.drug_chemical_normalize, a_layer(self.drug_embedding, dim_pass)) + \
            tf.matmul(self.drug_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
            tf.matmul(self.drug_sideeffect_normalize, a_layer(self.sideeffect_embedding, dim_pass)) + \
            tf.matmul(self.drug_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
            self.drug_embedding], axis=1), W0)+b0),dim=1)

        protein_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.protein_protein_normalize, a_layer(self.protein_embedding, dim_pass)) + \
            tf.matmul(self.protein_sequence_normalize, a_layer(self.protein_embedding, dim_pass)) + \
            tf.matmul(self.protein_disease_normalize, a_layer(self.disease_embedding, dim_pass)) + \
            tf.matmul(self.protein_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
            self.protein_embedding], axis=1), W0)+b0),dim=1)

        disease_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.disease_drug_normalize, a_layer(self.drug_embedding, dim_pass)) + \
            tf.matmul(self.disease_protein_normalize, a_layer(self.protein_embedding, dim_pass)), \
            self.disease_embedding], axis=1), W0)+b0),dim=1)

        sideeffect_vector1 = tf.nn.l2_normalize(relu(tf.matmul(
            tf.concat([tf.matmul(self.sideeffect_drug_normalize, a_layer(self.drug_embedding, dim_pass)), \
            self.sideeffect_embedding], axis=1), W0)+b0),dim=1)


        self.drug_representation = drug_vector1
        self.protein_representation = protein_vector1
        self.disease_representation = disease_vector1
        self.sideeffect_representation = sideeffect_vector1

        #reconstructing networks
        self.drug_drug_reconstruct = bi_layer(self.drug_representation,self.drug_representation, sym=True, dim_pred=dim_pred)
        self.drug_drug_reconstruct_loss = tf.reduce_sum(tf.multiply((self.drug_drug_reconstruct-self.drug_drug), (self.drug_drug_reconstruct-self.drug_drug)))

        self.drug_chemical_reconstruct = bi_layer(self.drug_representation,self.drug_representation, sym=True, dim_pred=dim_pred)
        self.drug_chemical_reconstruct_loss = tf.reduce_sum(tf.multiply((self.drug_chemical_reconstruct-self.drug_chemical), (self.drug_chemical_reconstruct-self.drug_chemical)))


        self.drug_disease_reconstruct = bi_layer(self.drug_representation,self.disease_representation, sym=False, dim_pred=dim_pred)
        self.drug_disease_reconstruct_loss = tf.reduce_sum(tf.multiply((self.drug_disease_reconstruct-self.drug_disease), (self.drug_disease_reconstruct-self.drug_disease)))


        self.drug_sideeffect_reconstruct = bi_layer(self.drug_representation,self.sideeffect_representation, sym=False, dim_pred=dim_pred)
        self.drug_sideeffect_reconstruct_loss = tf.reduce_sum(tf.multiply((self.drug_sideeffect_reconstruct-self.drug_sideeffect), (self.drug_sideeffect_reconstruct-self.drug_sideeffect)))


        self.protein_protein_reconstruct = bi_layer(self.protein_representation,self.protein_representation, sym=True, dim_pred=dim_pred)
        self.protein_protein_reconstruct_loss = tf.reduce_sum(tf.multiply((self.protein_protein_reconstruct-self.protein_protein), (self.protein_protein_reconstruct-self.protein_protein)))

        self.protein_sequence_reconstruct = bi_layer(self.protein_representation,self.protein_representation, sym=True, dim_pred=dim_pred)
        self.protein_sequence_reconstruct_loss = tf.reduce_sum(tf.multiply((self.protein_sequence_reconstruct-self.protein_sequence), (self.protein_sequence_reconstruct-self.protein_sequence)))


        self.protein_disease_reconstruct = bi_layer(self.protein_representation,self.disease_representation, sym=False, dim_pred=dim_pred)
        self.protein_disease_reconstruct_loss = tf.reduce_sum(tf.multiply((self.protein_disease_reconstruct-self.protein_disease), (self.protein_disease_reconstruct-self.protein_disease)))


        self.drug_protein_reconstruct = bi_layer(self.drug_representation,self.protein_representation, sym=False, dim_pred=dim_pred)
        tmp = tf.multiply(self.drug_protein_mask, (self.drug_protein_reconstruct-self.drug_protein))
        self.drug_protein_reconstruct_loss = tf.reduce_sum(tf.multiply(tmp, tmp))

        self.loss = self.drug_protein_reconstruct_loss + 1.0*(self.drug_drug_reconstruct_loss+self.drug_chemical_reconstruct_loss+
                                                            self.drug_disease_reconstruct_loss+self.drug_sideeffect_reconstruct_loss+
                                                            self.protein_protein_reconstruct_loss+self.protein_sequence_reconstruct_loss+
                                                            self.protein_disease_reconstruct_loss)
Exemple #34
0
def _model5():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    def block35(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None,name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 32, 3, bias=False, activation=None,name='Conv2d_0b_3x3')))
        tower_conv2_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 48,3, bias=False, activation=None, name='Conv2d_0b_3x3')))
        tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 64,3, bias=False, activation=None, name='Conv2d_0c_3x3')))
        tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net

    def block17(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv_1_0 = relu(batch_normalization(conv_2d(net, 128, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv_1_1 = relu(batch_normalization(conv_2d(tower_conv_1_0, 160,[1,7], bias=False, activation=None,name='Conv2d_0b_1x7')))
        tower_conv_1_2 = relu(batch_normalization(conv_2d(tower_conv_1_1, 192, [7,1], bias=False, activation=None,name='Conv2d_0c_7x1')))
        tower_mixed = merge([tower_conv,tower_conv_1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    def block8(net, scale=1.0, activation="relu"):
        tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
        tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
        tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
        tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
        tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
        tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
        net += scale * tower_out
        if activation:
            if isinstance(activation, str):
                net = activations.get(activation)(net)
            elif hasattr(activation, '__call__'):
                net = activation(net)
            else:
                raise ValueError("Invalid Activation.")
        return net


    num_classes = len(Y[0])
    dropout_keep_prob = 0.8

    network = input_data(shape=[None, inputSize, inputSize, dim],
             name='input',
             data_preprocessing=img_prep,
             data_augmentation=img_aug)
    conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID',activation=None,name='Conv2d_1a_3x3')))
    conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID',activation=None, name='Conv2d_2a_3x3')))
    conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
    maxpool3a_3_3 = max_pool_2d(conv2b_3_3, 3, strides=2, padding='VALID', name='MaxPool_3a_3x3')
    conv3b_1_1 = relu(batch_normalization(conv_2d(maxpool3a_3_3, 80, 1, bias=False, padding='VALID',activation=None, name='Conv2d_3b_1x1')))
    conv4a_3_3 = relu(batch_normalization(conv_2d(conv3b_1_1, 192, 3, bias=False, padding='VALID',activation=None, name='Conv2d_4a_3x3')))
    maxpool5a_3_3 = max_pool_2d(conv4a_3_3, 3, strides=2, padding='VALID', name='MaxPool_5a_3x3')

    tower_conv = relu(batch_normalization(conv_2d(maxpool5a_3_3, 96, 1, bias=False, activation=None, name='Conv2d_5b_b0_1x1')))

    tower_conv1_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 48, 1, bias=False, activation=None, name='Conv2d_5b_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 64, 5, bias=False, activation=None, name='Conv2d_5b_b1_0b_5x5')))

    tower_conv2_0 = relu(batch_normalization(conv_2d(maxpool5a_3_3, 64, 1, bias=False, activation=None, name='Conv2d_5b_b2_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 96, 3, bias=False, activation=None, name='Conv2d_5b_b2_0b_3x3')))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 96, 3, bias=False, activation=None,name='Conv2d_5b_b2_0c_3x3')))

    tower_pool3_0 = avg_pool_2d(maxpool5a_3_3, 3, strides=1, padding='same', name='AvgPool_5b_b3_0a_3x3')
    tower_conv3_1 = relu(batch_normalization(conv_2d(tower_pool3_0, 64, 1, bias=False, activation=None,name='Conv2d_5b_b3_0b_1x1')))

    tower_5b_out = merge([tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1], mode='concat', axis=3)

    net = repeat(tower_5b_out, 10, block35, scale=0.17)
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 3, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 3, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 3, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,3, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,3, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 3, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    tower_pool = max_pool_2d(net, 3, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    '''
    tower_conv = relu(batch_normalization(conv_2d(net, 384, 1, bias=False, strides=2,activation=None, padding='VALID', name='Conv2d_6a_b0_0a_3x3')))
    tower_conv1_0 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 256, 1, bias=False, activation=None, name='Conv2d_6a_b1_0b_3x3')))
    tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_6a_b1_0c_3x3')))
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID',name='MaxPool_1a_3x3')
    net = merge([tower_conv, tower_conv1_2, tower_pool], mode='concat', axis=3)
    net = repeat(net, 20, block17, scale=0.1)

    tower_conv = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
    tower_conv0_1 = relu(batch_normalization(conv_2d(tower_conv, 384, 1, bias=False, strides=2, padding='VALID', activation=None,name='Conv2d_0a_1x1')))

    tower_conv1 = relu(batch_normalization(conv_2d(net, 256, 1, bias=False, padding='VALID', activation=None,name='Conv2d_0a_1x1')))
    tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1,288,1, bias=False, strides=2, padding='VALID',activation=None, name='COnv2d_1a_3x3')))

    tower_conv2 = relu(batch_normalization(conv_2d(net, 256,1, bias=False, activation=None,name='Conv2d_0a_1x1')))
    tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2, 288,1, bias=False, name='Conv2d_0b_3x3',activation=None)))
    tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 320, 1, bias=False, strides=2, padding='VALID',activation=None, name='Conv2d_1a_3x3')))
    
    
    tower_pool = max_pool_2d(net, 1, strides=2, padding='VALID', name='MaxPool_1a_3x3')
    
    ####
    net = merge([tower_conv0_1, tower_conv1_1,tower_conv2_2, tower_pool], mode='concat', axis=3)

    net = repeat(net, 9, block8, scale=0.2)
    net = block8(net, activation=None)

    net = relu(batch_normalization(conv_2d(net, 1536, 1, bias=False, activation=None, name='Conv2d_7b_1x1')))
    net = avg_pool_2d(net, net.get_shape().as_list()[1:3],strides=2, padding='VALID', name='AvgPool_1a_8x8')
    net = flatten(net)
    net = dropout(net, dropout_keep_prob)
    loss = fully_connected(net, num_classes,activation='softmax')


    network = tflearn.regression(loss, optimizer='RMSprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    model = tflearn.DNN(network, checkpoint_path='inception_resnet_v2',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir="./tflearn_logs/")

    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest), shuffle=True,
              show_metric=True, batch_size=batchNum, snapshot_step=2000,
              snapshot_epoch=False, run_id='inception_resnet_v2_oxflowers17')

    if modelStore: model.save(_id + '-model.tflearn')