Esempio n. 1
0
    def network(self, z, labels, reuse):

        # Fully connected
        i_shape = self.initial_shape
        h = ops.snlinear(z,
                         i_shape[1] * i_shape[2] * i_shape[3],
                         name='noise_linear')
        h = tf.reshape(h, i_shape)

        # Resnet architecture
        hidden_dim = self.starting_dim
        for layer_id in range(self.number_of_layers):
            self.log(h.shape)
            block_name, dilation_rate, hidden_dim, stride = self.get_block_params(
                hidden_dim, layer_id)
            h = self.add_sn_block(h, hidden_dim, block_name, dilation_rate,
                                  stride)
            if layer_id == self.number_of_layers - 2:
                h = self.add_attention(h, hidden_dim, reuse)
                hidden_dim = hidden_dim * 2

        # Final conv
        h_act = self.act(self.final_bn(h), name="h_act")
        last = ops.snconv2d(h_act, NUM_AMINO_ACIDS, (1, 1), name='last_conv')

        # Gumbel max trick
        out = RelaxedOneHotCategorical(temperature=self.get_temperature(True),
                                       logits=last).sample()
        return out
Esempio n. 2
0
def generator_resnet(zs,
                     labels,
                     gf_dim,
                     num_classes,
                     kernel=(3, 3),
                     strides=[(2, 2)],
                     dilations=(1, 1),
                     pooling='avg',
                     scope_name='Generator',
                     reuse=False,
                     output_shape=[8, 128, 1],
                     act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        height_d, width_d = get_dimentions_factors(strides)
        number_of_layers = len(strides)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))

        c_h = int(output_shape[0] / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * c_w * hidden_dim, name='noise_linear')
        h = tf.reshape(h, [-1, c_h, c_w, hidden_dim])
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                print(h.shape)
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                dilation_rate = (dilations[0]**(number_of_layers - layer_id),
                                 dilations[1]**(number_of_layers - layer_id))
                h = sn_block(h, hidden_dim, block_name, get_kernel(h, kernel),
                             strides[layer_id], dilation_rate, act, pooling,
                             'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / strides[layer_id][1]

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        if output_shape[2] == 1:
            out = tf.nn.tanh(ops.snconv2d(h_act,
                                          1, (output_shape[0], 1),
                                          name='last_conv'),
                             name="generated")
        else:
            out = tf.nn.tanh(ops.snconv2d(h_act, 3, (1, 1), name='last_conv'),
                             name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Esempio n. 3
0
def discriminator_rnn(x,
                      labels,
                      df_dim,
                      number_classes,
                      kernel=(3, 3),
                      strides=(2, 2),
                      dilations=(1, 1),
                      pooling='avg',
                      update_collection=None,
                      act=tf.nn.relu,
                      scope_name='Discriminator',
                      reuse=False):
    num_layers = 3
    num_nodes = [int(8 / 2), df_dim, df_dim]
    x = tf.transpose(tf.squeeze(x), perm=[0, 2, 1])

    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()
        # Define LSTM cells
        enc_fw_cells = [
            LSTMCell(num_nodes[layer], name="fw_" + str(layer))
            for layer in range(num_layers)
        ]
        enc_bw_cells = [
            LSTMCell(num_nodes[layer], name="bw_" + str(layer))
            for layer in range(num_layers)
        ]

        # Connect LSTM cells bidirectionally and stack
        (all_states, fw_state,
         bw_state) = stack_bidirectional_dynamic_rnn(cells_fw=enc_fw_cells,
                                                     cells_bw=enc_bw_cells,
                                                     inputs=x,
                                                     dtype=tf.float32)

        # Concatenate results
        for k in range(num_layers):
            if k == 0:
                con_c = tf.concat((fw_state[k].c, bw_state[k].c), 1)
                con_h = tf.concat((fw_state[k].h, bw_state[k].h), 1)
            else:
                con_c = tf.concat((con_c, fw_state[k].c, bw_state[k].c), 1)
                con_h = tf.concat((con_h, fw_state[k].h, bw_state[k].h), 1)

        output = all_states[:, x.get_shape.as_list()[2]]
        output = ops.snlinear(output,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')
    return output, tf.concat((fw_state[2].c, bw_state[2].c), 1)
Esempio n. 4
0
    def network(self, data, labels, reuse):
        tf.summary.histogram("Input", data, family=self.scope_name)
        h = data
        hidden_dim = self.dim
        for layer in range(len(self.strides)):
            print(h.shape)
            if layer == 1:
                h = attention(h, hidden_dim, sn=True, reuse=reuse)
                tf.summary.histogram("attention", h, family=self.scope_name)
            block_name = 'd_block{}'.format(layer)
            hidden_dim = hidden_dim * self.strides[layer][0]
            # dilation_rate = dilations[0] ** max(1, layer-2), dilations[1] ** max(1, layer-2)
            dilation_rate = (1, 1)
            h = sn_block(h,
                         hidden_dim,
                         block_name,
                         get_kernel(h, self.kernel),
                         self.strides[layer],
                         dilation_rate,
                         None,
                         self.act,
                         self.pooling,
                         padding='VALID')
            tf.summary.histogram(block_name, h, family=self.scope_name)

        end_block = self.act(h, name="after_resnet_block")
        tf.summary.histogram("after_resnet_block",
                             end_block,
                             family=self.scope_name)

        h_std = ops.minibatch_stddev_layer(end_block)
        tf.summary.histogram("minibatch_stddev_layer",
                             h_std,
                             family=self.scope_name)
        # h_std_conv_std = act(ops.snconv2d(h_std, hidden_dim, (1, 3), update_collection=update_collection,
        #                                  name='minibatch_stddev_stride', padding=None, strides=(1, 3)),
        #                     name="minibatch_stddev_stride_act")
        # tf.summary.histogram("after_mini_batch_std", h_std_conv_std, family=scope_name)
        # h_final_flattened = tf.layers.flatten(h_std_conv_std)
        h_final_flattened = tf.reduce_sum(h_std, [1, 2])
        tf.summary.histogram("h_final_flattened",
                             h_final_flattened,
                             family=self.scope_name)
        output = ops.snlinear(h_final_flattened, 1, name='d_sn_linear')
        tf.summary.histogram("final_output", output, family=self.scope_name)
        return output, h_final_flattened
Esempio n. 5
0
    def network(self, data, labels, reuse):

        # Embedding
        embedding_map_bar = self.get_embeddings(
            shape=[NUM_AMINO_ACIDS, self.dim])
        h = self.embedding_lookup(data, embedding_map_bar)

        # Resnet
        hidden_dim = self.dim
        for layer in range(len(self.strides)):
            self.log(h.shape)
            block_name, dilation_rate, hidden_dim, strides = self.get_block_params(
                hidden_dim, layer)
            h = self.add_sn_block(h, hidden_dim, block_name, dilation_rate,
                                  strides)
            if layer == 0:
                self.add_attention(h, hidden_dim, reuse)

        end_block = self.act(h, name="after_resnet_block")
        tf.summary.histogram("after_resnet_block",
                             end_block,
                             family=self.scope_name)
        h_std = ops.minibatch_stddev_layer_v2(end_block)
        tf.summary.histogram("minibatch_stddev_layer",
                             h_std,
                             family=self.scope_name)

        final_conv = ops.snconv2d(h_std,
                                  int(hidden_dim / 16), (1, 1),
                                  name='final_conv',
                                  padding=None)
        self.log(final_conv.shape)
        output = ops.snlinear(tf.squeeze(tf.layers.flatten(final_conv)),
                              1,
                              name='d_sn_linear')
        tf.summary.scalar(
            "1",
            tf.cast(
                tf.py_func(
                    lambda x, y: self.print_data(x, y),
                    [tf.squeeze(data), tf.squeeze(output)], tf.double),
                tf.float32))
        return output, end_block
Esempio n. 6
0
    def network(self, z, labels, reuse):
        height_d, width_d = get_dimentions_factors(self.strides)
        number_of_layers = len(self.strides)
        hidden_dim = self.dim * (2**(number_of_layers - 1))
        c_h = int(self.height / height_d)
        c_w = int((self.length / width_d))
        h = ops.snlinear(z, c_h * c_w * hidden_dim, name='noise_linear')
        h = tf.reshape(h, [-1, c_h, c_w, hidden_dim])
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=reuse):
            for layer_id in range(number_of_layers):
                print(h.shape)
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                dilation_rate = (1, 1)
                h = sn_block(h, hidden_dim, block_name,
                             get_kernel(h,
                                        self.kernel), self.strides[layer_id],
                             dilation_rate, self.act, self.pooling, 'VALID')
                tf.summary.histogram(block_name, h, family=self.scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention",
                                         h,
                                         family=self.scope_name)
                hidden_dim = hidden_dim / self.strides[layer_id][1]

        bn = ops.BatchNorm(name='g_bn')
        h_act = leaky_relu(bn(h), name="h_act")
        if self.output_shape[2] == 1:
            out = tf.nn.tanh(ops.snconv2d(h_act,
                                          1, (self.output_shape[0], 1),
                                          name='last_conv'),
                             name="generated")
        else:
            out = tf.nn.tanh(ops.snconv2d(h_act, 21, (1, 1), name='last_conv'),
                             name="generated")
        tf.summary.histogram("Generated_results", out, family=self.scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Esempio n. 7
0
def discriminator_resnet(x,
                         labels,
                         df_dim,
                         number_classes,
                         kernel=(3, 3),
                         strides=[(2, 2)],
                         dilations=(1, 1),
                         pooling='avg',
                         update_collection=None,
                         act=tf.nn.relu,
                         scope_name='Discriminator',
                         reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()
        tf.summary.histogram("Input", x, family=scope_name)
        h = x
        hidden_dim = df_dim
        for layer in range(len(strides)):
            print(h.shape)
            if layer == 1:
                h = attention(h, hidden_dim, sn=True, reuse=reuse)
                tf.summary.histogram("attention", h, family=scope_name)
            block_name = 'd_block{}'.format(layer)
            hidden_dim = hidden_dim * strides[layer][0]
            dilation_rate = dilations[0]**(layer + 1), dilations[1]**(layer +
                                                                      1)
            h = sn_block(h,
                         hidden_dim,
                         block_name,
                         get_kernel(h, kernel),
                         strides[layer],
                         dilation_rate,
                         update_collection,
                         act,
                         pooling,
                         padding='VALID')
            tf.summary.histogram(block_name, h, family=scope_name)

        end_block = act(h, name="after_resnet_block")
        tf.summary.histogram("after_resnet_block",
                             end_block,
                             family=scope_name)

        # h_std = ops.minibatch_stddev_layer(end_block)
        # tf.summary.histogram(h_std.name, h_std, family=scope_name)
        # h_std_conv_std = act(ops.snconv2d(h_std, hidden_dim, (1, 3), update_collection=update_collection,
        #                                  name='minibatch_stddev_stride', padding=None, strides=(1, 3)),
        #                     name="minibatch_stddev_stride_act")
        # tf.summary.histogram("after_mini_batch_std", h_std_conv_std, family=scope_name)
        # h_final_flattened = tf.layers.flatten(h_std_conv_std)
        h_final_flattened = tf.reduce_sum(end_block, [1, 2])
        tf.summary.histogram("h_final_flattened",
                             h_final_flattened,
                             family=scope_name)
        output = ops.snlinear(h_final_flattened,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')
        tf.summary.histogram("final_output", output, family=scope_name)
        # output = tf.Print(output, [tf.py_func(
        #     lambda val, score: print_protein_values(val,score),
        #     [tf.squeeze(x)[0], output[0]], tf.string)], "seq:")
        return output, h_final_flattened
Esempio n. 8
0
def generator_1d(zs,
                 labels,
                 gf_dim,
                 num_classes,
                 kernel=(3, 3),
                 strides=(2, 2),
                 dilations=(1, 1),
                 pooling='avg',
                 scope_name='Generator',
                 reuse=False,
                 output_shape=[8, 128, 1],
                 act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        # strides_schedule = get_strides_schedule(input_width)
        strides_schedule = [(1, 2), (1, 2), (1, 2), (1, 2)]
        height_d, width_d = get_dimentions_factors(strides_schedule)

        number_of_layers = len(strides_schedule)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))
        kernel = (1, 5)
        c_h = 1  # int(embedding_height / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * int(c_w) * hidden_dim, name='noise_linear')
        tf.summary.histogram("noise_snlinear", h, family=scope_name)
        h = tf.reshape(h, shape=[-1, c_h, int(c_w), hidden_dim])
        # new_shape = [h.get_shape().as_list()[0], h.get_shape().as_list()[1],
        #              h.get_shape().as_list()[2] * 2, hidden_dim]
        # h = ops.sndeconv2d(h, new_shape, (c_h, 3), name='noise_expand', strides=(1, 2))
        # h = act(h)
        tf.summary.histogram("before_blocks", h, family=scope_name)
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                # dilation_rate = (dilations[0] ** (number_of_layers - layer_id),
                #                  dilations[1] ** (number_of_layers - layer_id))
                dilation_rate = (1, 1)
                h = sn_block(h, hidden_dim, block_name, kernel,
                             strides_schedule[layer_id], dilation_rate, act,
                             pooling, 'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / 2

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        h_act = tf.reshape(h_act, [
            -1, output_shape[0], output_shape[1],
            int(hidden_dim / output_shape[0]) * 2
        ])
        h_act = ops.snconv2d(h_act,
                             int(hidden_dim / output_shape[0]) * 2,
                             (output_shape[0], 1),
                             name='embedding_height')
        out = tf.nn.tanh(ops.snconv2d(h_act,
                                      1, (output_shape[0], 1),
                                      name='last_conv'),
                         name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out