Ejemplo n.º 1
0
def reaction_encoder(dilations, gf_dim, kernel, reactions, pooling, reuse,
                     scope_name, input_width):
    compressed = []
    strides_schedule = [(2, 2), (2, 2), (2, 2)]
    down_layers = len(strides_schedule)
    with tf.variable_scope("down", reuse=tf.AUTO_REUSE):
        for i in range(4):
            hidden_dim = gf_dim / 8
            reaction_part = tf.transpose(ops.sn_embedding(
                tf.squeeze(reactions[:, i, :]),
                len(SMILES_CHARACTER_TO_ID),
                8,
                name='component_embedding'),
                                         perm=[0, 2, 1])
            tf.summary.histogram("embedded_reactions",
                                 reaction_part,
                                 family=scope_name)
            h = tf.expand_dims(reaction_part, axis=3)
            if input_width < 256:
                h = tf.nn.leaky_relu(
                    ops.snconv2d(h,
                                 hidden_dim, (h.get_shape().as_list()[1], 3),
                                 name='first_component_conv',
                                 padding="CYCLE",
                                 strides=(1, 2)))
            tf.summary.histogram("first_component_conv", h, family=scope_name)
            for layer_id in range(down_layers):
                hidden_dim = hidden_dim * strides_schedule[layer_id][0]
                block_name = 'block{}'.format(layer_id)
                dilation_rate = dilations[0]**(layer_id +
                                               1), dilations[1]**(layer_id + 1)
                h = ops.sn_norm_block(h,
                                      hidden_dim,
                                      block_name,
                                      get_kernel(h, kernel),
                                      strides_schedule[layer_id],
                                      dilation_rate,
                                      act=tf.nn.leaky_relu,
                                      pooling=pooling,
                                      padding="VALID")
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == 0:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("attention", h, family=scope_name)
            compressed.append(h)
        print("Component Shape", compressed[0].shape)
        compressed_reaction = tf.concat(
            [compressed[0], compressed[1], compressed[2], compressed[3]],
            axis=3,
            name="compressed_reaction")
        tf.summary.histogram("compressed_reaction",
                             compressed_reaction,
                             family=scope_name)
        print("REACTION COMPRESSED TO: ", compressed_reaction.shape)
    return compressed_reaction
Ejemplo n.º 2
0
def generator_resnet(zs,
                     labels,
                     gf_dim,
                     num_classes,
                     kernel=(3, 3),
                     strides=[(2, 2)],
                     dilations=(1, 1),
                     pooling='avg',
                     scope_name='Generator',
                     reuse=False,
                     output_shape=[8, 128, 1],
                     act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        height_d, width_d = get_dimentions_factors(strides)
        number_of_layers = len(strides)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))

        c_h = int(output_shape[0] / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * c_w * hidden_dim, name='noise_linear')
        h = tf.reshape(h, [-1, c_h, c_w, hidden_dim])
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                print(h.shape)
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                dilation_rate = (dilations[0]**(number_of_layers - layer_id),
                                 dilations[1]**(number_of_layers - layer_id))
                h = sn_block(h, hidden_dim, block_name, get_kernel(h, kernel),
                             strides[layer_id], dilation_rate, act, pooling,
                             'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / strides[layer_id][1]

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        if output_shape[2] == 1:
            out = tf.nn.tanh(ops.snconv2d(h_act,
                                          1, (output_shape[0], 1),
                                          name='last_conv'),
                             name="generated")
        else:
            out = tf.nn.tanh(ops.snconv2d(h_act, 3, (1, 1), name='last_conv'),
                             name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Ejemplo n.º 3
0
def discriminator_resnet(x,
                         labels,
                         df_dim,
                         number_classes,
                         kernel=(3, 3),
                         strides=[(2, 2)],
                         dilations=(1, 1),
                         pooling='avg',
                         update_collection=None,
                         act=tf.nn.relu,
                         scope_name='Discriminator',
                         reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()
        tf.summary.histogram("Input", x, family=scope_name)
        h = x
        hidden_dim = df_dim
        for layer in range(len(strides)):
            print(h.shape)
            if layer == 1:
                h = attention(h, hidden_dim, sn=True, reuse=reuse)
                tf.summary.histogram("attention", h, family=scope_name)
            block_name = 'd_block{}'.format(layer)
            hidden_dim = hidden_dim * strides[layer][0]
            dilation_rate = dilations[0]**(layer + 1), dilations[1]**(layer +
                                                                      1)
            h = sn_block(h,
                         hidden_dim,
                         block_name,
                         get_kernel(h, kernel),
                         strides[layer],
                         dilation_rate,
                         update_collection,
                         act,
                         pooling,
                         padding='VALID')
            tf.summary.histogram(block_name, h, family=scope_name)

        end_block = act(h, name="after_resnet_block")
        tf.summary.histogram("after_resnet_block",
                             end_block,
                             family=scope_name)

        # h_std = ops.minibatch_stddev_layer(end_block)
        # tf.summary.histogram(h_std.name, h_std, family=scope_name)
        # h_std_conv_std = act(ops.snconv2d(h_std, hidden_dim, (1, 3), update_collection=update_collection,
        #                                  name='minibatch_stddev_stride', padding=None, strides=(1, 3)),
        #                     name="minibatch_stddev_stride_act")
        # tf.summary.histogram("after_mini_batch_std", h_std_conv_std, family=scope_name)
        # h_final_flattened = tf.layers.flatten(h_std_conv_std)
        h_final_flattened = tf.reduce_sum(end_block, [1, 2])
        tf.summary.histogram("h_final_flattened",
                             h_final_flattened,
                             family=scope_name)
        output = ops.snlinear(h_final_flattened,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')
        tf.summary.histogram("final_output", output, family=scope_name)
        # output = tf.Print(output, [tf.py_func(
        #     lambda val, score: print_protein_values(val,score),
        #     [tf.squeeze(x)[0], output[0]], tf.string)], "seq:")
        return output, h_final_flattened
Ejemplo n.º 4
0
def generator_1d(zs,
                 labels,
                 gf_dim,
                 num_classes,
                 kernel=(3, 3),
                 strides=(2, 2),
                 dilations=(1, 1),
                 pooling='avg',
                 scope_name='Generator',
                 reuse=False,
                 output_shape=[8, 128, 1],
                 act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        # strides_schedule = get_strides_schedule(input_width)
        strides_schedule = [(1, 2), (1, 2), (1, 2), (1, 2)]
        height_d, width_d = get_dimentions_factors(strides_schedule)

        number_of_layers = len(strides_schedule)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))
        kernel = (1, 5)
        c_h = 1  # int(embedding_height / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * int(c_w) * hidden_dim, name='noise_linear')
        tf.summary.histogram("noise_snlinear", h, family=scope_name)
        h = tf.reshape(h, shape=[-1, c_h, int(c_w), hidden_dim])
        # new_shape = [h.get_shape().as_list()[0], h.get_shape().as_list()[1],
        #              h.get_shape().as_list()[2] * 2, hidden_dim]
        # h = ops.sndeconv2d(h, new_shape, (c_h, 3), name='noise_expand', strides=(1, 2))
        # h = act(h)
        tf.summary.histogram("before_blocks", h, family=scope_name)
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                # dilation_rate = (dilations[0] ** (number_of_layers - layer_id),
                #                  dilations[1] ** (number_of_layers - layer_id))
                dilation_rate = (1, 1)
                h = sn_block(h, hidden_dim, block_name, kernel,
                             strides_schedule[layer_id], dilation_rate, act,
                             pooling, 'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / 2

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        h_act = tf.reshape(h_act, [
            -1, output_shape[0], output_shape[1],
            int(hidden_dim / output_shape[0]) * 2
        ])
        h_act = ops.snconv2d(h_act,
                             int(hidden_dim / output_shape[0]) * 2,
                             (output_shape[0], 1),
                             name='embedding_height')
        out = tf.nn.tanh(ops.snconv2d(h_act,
                                      1, (output_shape[0], 1),
                                      name='last_conv'),
                         name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out