Example #1
0
def get_model(point_cloud, is_training,Transform=False, bn_decay=None):
    forloss = []
    idx=None
    if point_cloud.get_shape()[-1].value>2:
        l0_xyz = point_cloud[:,:,0:2]
        l0_points = point_cloud [:,:,2:]
    else:
        l0_xyz = point_cloud
        l0_points = None


    if Transform:
        l0_xyz, tnet,l0_points= transform_moudule(l0_xyz,l0_points, is_training,idx, [64,128,1024],[[1, 1], [1, 1],[1, 1]], [512,256],num_channle=32, nsample=32, scope='Tnet1', bn_decay=bn_decay, bn=True, K=2)
        #forloss=tnet
    l0_points, plane_feature, _ = plane_module(l0_xyz, l0_points, idx, centralize=True, num_channle=32, npoint=512,
                                               have_sample=False,
                                               nsample=32, is_training=is_training, scope='plane', pool='avg',
                                               use_xyz=True, bn=True, bn_decay=bn_decay, weight_decay=0.0)

    new_points,_ = conv_module(l0_points,plane_feature,mlp=[64,128,128,1024], is_training=is_training, scope='conv', bn=True, bn_decay=bn_decay)
    # Fully connected layers
    net = util.fully_connected(new_points, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = util.fully_connected(net, 10, activation_fn=None, scope='fc3')


    return net, forloss
Example #2
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        ############################################################################
        #                             Define the graph                             #
        ############################################################################
        # It turns out that this network from ex03 is already capable of memorizing
        # the entire training or validation set, so we need to tweak generalization,
        # not capacity
        # In order to speed up convergence, we added batch normalization.
        # Our best effort was Adam optimizer with bs=32, lr=0.001 (only possible
        # because of norm)
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None,), name='labels')

        self.x = x
        self.y_ = y_

        kernel_shape1 = (5, 5, 1, 8)
        activation1 = conv_layer(x, kernel_shape1, activation=activation)

        normalize1 = batch_norm_layer(activation1)

        pool1 = weighted_pool_layer(
            normalize1, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)
        )

        kernel_shape2 = (3, 3, 8, 10)
        activation2 = conv_layer(pool1, kernel_shape2, activation=activation)

        normalize2 = batch_norm_layer(activation2)

        pool2 = weighted_pool_layer(
            normalize2, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)
        )

        pool2_reshaped = tf.reshape(pool2, (-1, 8*8*10), name='reshaped1')
        fc1 = fully_connected(pool2_reshaped, 512, with_activation=True,
                activation=activation)

        fc2_logit = fully_connected(fc1, 10, activation=activation)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=fc2_logit,
                                                                labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step
        self.prediction = tf.cast(tf.argmax(fc2_logit, 1), tf.int32)

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(self.prediction, y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy
Example #3
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        ############################################################################
        #                             Define the graph                             #
        ############################################################################
        # It turns out that this network from ex03 is already capable of memorizing
        # the entire training or validation set, so we need to tweak generalization,
        # not capacity
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None, ), name='labels')

        self.x = x
        self.y_ = y_

        kernel_shape1 = (5, 5, 1, 16)
        activation1 = conv_layer(x, kernel_shape1, activation=activation)

        pool1 = max_pool_layer(activation1,
                               ksize=(1, 2, 2, 1),
                               strides=(1, 2, 2, 1))

        kernel_shape2 = (3, 3, 16, 32)
        activation2 = conv_layer(pool1, kernel_shape2, activation=activation)

        pool2 = max_pool_layer(activation2,
                               ksize=(1, 2, 2, 1),
                               strides=(1, 2, 2, 1))

        pool2_reshaped = tf.reshape(pool2, (-1, 2048), name='reshaped1')
        fc1 = fully_connected(pool2_reshaped,
                              512,
                              with_activation=True,
                              activation=activation)

        fc2_logit = fully_connected(fc1, 10, activation=activation)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=fc2_logit, labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(
            tf.argmax(fc2_logit, 1, output_type=tf.int32), y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy
Example #4
0
def transform_moudule(xyz,points, is_training,idx, num_out_conv, size_conv, num_out_fc,num_channle,nsample, scope, bn_decay, bn=True,K=3):
    with tf.variable_scope(scope) as sc:
        new_points, net ,idx = plane_module(xyz, points, idx,centralize=True, num_channle=num_channle, npoint=1024,
                                                          nsample=nsample,
                                                          is_training=is_training, scope='Tnet-plane', pool='avg',
                                                          use_xyz=True, bn=bn, bn_decay=bn_decay,weight_decay=0.0)


        batch_size = tf.shape(xyz)[0]
        for i in range(len(num_out_conv)):
            net = util.conv2d(net, num_out_conv[i], size_conv[i], scope='tconv' + str(i + 1), stride=[1, 1], bn=True,
                         bn_decay=bn_decay, is_training=is_training, padding='VALID')
        net = tf.squeeze(net, 2)
        net = tf.reduce_max(net, axis=1)

        for i in range(len(num_out_fc)):
            net = util.fully_connected(net, num_out_fc[i], scope='tfc' + str(i + 1), bn_decay=bn_decay, bn=True,
                                  is_training=is_training)
        with tf.variable_scope(scope) as sc:
            weights = tf.get_variable('weights', [256, K * K],
                                      initializer=tf.constant_initializer(0.0),
                                      dtype=tf.float32)
            biases = tf.get_variable('biases', [K * K],
                                     initializer=tf.constant_initializer(0.0),
                                     dtype=tf.float32)
            biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
            transform = tf.matmul(net, weights)
            transform = tf.nn.bias_add(transform, biases)

        transform = tf.reshape(transform, [batch_size, K, K])

        xyz = tf.matmul(xyz, transform)
        if new_points is not None:
            new_points = tf.matmul(new_points, transform)
    return xyz,transform,new_points
Example #5
0
 def action_value_function(self, input, reuse=True, scope='Action_Value'):
     #should be created with invarianve to batch size
     #input should be a placeholder
     #outputs are actions shape [batch_size, num_actions]
     with tf.variable_scope(scope, reuse=reuse):
         out = tf.cast(input, tf.float32)
         conv_params = self.params['conv_layers']
         for i in range(len(conv_params)):
             clp = conv_params[i]
             out = ut.conv2d(out, clp[0], clp[1], clp[2],
                             'conv2d%d' % (i + 1))
             out = tf.nn.relu(out)
         out = ut.fully_connected(out, self.params['FC_layer'],
                                  'fully_con1')
         out = tf.nn.relu(out)
         out = ut.fully_connected(out, self.params['n_output'],
                                  'fully_con2', False)
         return out
Example #6
0
 def farword(self, inputs_image):
     inputs = tf.reshape(inputs_image, [-1, 58, 58, 1])
     with tf.variable_scope('conv1'):
         W1 = tf.Variable(tf.random_normal([3, 3, 1, 16]), name='w1')
         net = conv2d(inputs, W1)
         net = pooling(net)
     with tf.variable_scope('conv2'):
         W2 = tf.Variable(tf.random_normal([3, 3, 16, 16]), name='w2')
         net = conv2d(net, W2)
         net = pooling(net)
     with tf.variable_scope('fc'):
         logits = fully_connected(net)
     return logits
Example #7
0
    def __init__(self, learning_rate, **kwargs):
        '''Respected kwargs are

        Parameters
        ----------
        steps   :   int
                    Decay learning rate according to `exponential_decay` every `steps` steps
        decay   :   decay factor for `exponential_decay`
        learning_rate   :   base learning rate for `exponential_decay`
        '''

        ######################################
        #  Create the dynamic learning rate  #
        ######################################
        step_counter = tf.Variable(0,
                                   trainable=False,
                                   dtype=tf.int32,
                                   name='step_counter')
        steps = kwargs.get('steps', 100)
        decay = kwargs.get('decay', 0.8)
        learning_rate = tf.train.exponential_decay(learning_rate, step_counter,
                                                   steps, decay)

        ############################################################################################
        #                                    Define the network                                    #
        ############################################################################################
        self.observations = tf.placeholder(tf.float32,
                                           shape=[1, 4],
                                           name='observations')
        hidden_layer = fully_connected(self.observations,
                                       8,
                                       with_activation=True,
                                       activation=tf.nn.relu)
        probability = fully_connected(hidden_layer,
                                      1,
                                      with_activation=True,
                                      activation=tf.nn.sigmoid)
        complementary = tf.subtract(1.0, probability)
        output = tf.concat([probability, complementary],
                           1,
                           name='action_probabilities')
        log_likelihoods = tf.log(output)
        self.action = tf.multinomial(log_likelihoods, num_samples=1)[0][0]
        log_likelihood = log_likelihoods[:, tf.to_int32(self.action)]

        optimizer = tf.train.AdamOptimizer(learning_rate)
        grads_and_vars = optimizer.compute_gradients(log_likelihood)
        self.gradients = [grad * -1 for (grad, _) in grads_and_vars]

        # Gradients must be fed for training
        self.grad_placeholders = []
        for i, gradient in enumerate(self.gradients):
            self.grad_placeholders.append(
                tf.placeholder(tf.float32,
                               gradient.shape,
                               name=f'gradient_{i}'))
        self.grad_dummies = [
            np.zeros(grad.shape) for grad in self.grad_placeholders
        ]

        tvars = tf.trainable_variables()
        self.training_step = optimizer.apply_gradients(
            zip(self.grad_placeholders, tvars), global_step=step_counter)
Example #8
0
    def __init__(self, optimizer, activation):
        super().__init__(optimizer, activation)

        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='input')
        y_ = tf.placeholder(dtype=tf.int32, shape=(None, ), name='labels')

        self.x = x
        self.y_ = y_
        # logits = (LinearWrap(image)
        #               .Conv2D('conv1', 24, 5, padding='VALID')
        #               .MaxPooling('pool1', 2, padding='SAME')
        #               .Conv2D('conv2', 32, 3, padding='VALID')
        #               .Conv2D('conv3', 32, 3, padding='VALID')
        #               .MaxPooling('pool2', 2, padding='SAME')
        #               .Conv2D('conv4', 64, 3, padding='VALID')
        #               .Dropout('drop', 0.5)
        #               .FullyConnected('fc0', 512,
        #                               b_init=tf.constant_initializer(0.1), nl=tf.nn.relu)
        #               .FullyConnected('linear', out_dim=10, nl=tf.identity)())
        # tf.nn.softmax(logits, name='output')

        l1 = conv_layer(x, (5, 5, 1, 24),
                        activation=None,
                        padding='VALID',
                        use_bias=False)
        l2 = tf.nn.relu(batch_norm_layer(l1))

        l3 = tf.nn.max_pool(l2, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')

        l4 = conv_layer(l3, (3, 3, 24, 32),
                        padding='VALID',
                        activation=None,
                        use_bias=False)
        l5 = tf.nn.relu(batch_norm_layer(l4))

        l6 = conv_layer(l5, (3, 3, 32, 32),
                        padding='VALID',
                        activation=None,
                        use_bias=False)
        l7 = tf.nn.relu(batch_norm_layer(l6))

        l8 = tf.nn.max_pool(l7, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')

        l8_ = conv_layer(l8, (3, 3, 32, 64),
                         padding='VALID',
                         activation=None,
                         use_bias=False)
        l9 = tf.nn.relu(batch_norm_layer(l8_))

        l10 = tf.nn.dropout(l9, 0.5)

        l11 = tf.reshape(l10, (-1, 3 * 3 * 64), name='reshaped1')

        l12 = fully_connected(l11,
                              512,
                              with_activation=True,
                              activation=tf.nn.relu)

        l13 = fully_connected(l12, 10, with_activation=False, use_bias=False)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=l13, labels=y_)
        mean_cross_entropy = tf.reduce_mean(cross_entropy)
        self.mean_cross_entropy = mean_cross_entropy
        train_step = optimizer.minimize(mean_cross_entropy)
        self.train_step = train_step
        self.prediction = tf.cast(tf.argmax(l13, 1), tf.int32)

        # check if neuron firing strongest coincides with max value position in real
        # labels
        correct_prediction = tf.equal(self.prediction, y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.accuracy = accuracy
Example #9
0
    def __init__(self, **kwargs):
        '''The following arguments are accepted:

        Parameters
        ----------
        vocab_size  :   int
                        Size of the vocabulary for creating embeddings
        embedding_matrix    :   int
                                Dimensionality of the embedding space
        memory_size :   int
                        LSTM memory size
        keep_prob   :   float
                        Inverse of dropout percentage for embedding and LSTM
        subsequence_length  :   int
                                Length of the subsequences (all embeddings are padded to this
                                length)
        optimizer   :   OptimizerSpec
        '''
        ############################################################################################
        #                                 Get all hyperparameters                                  #
        ############################################################################################
        vocab_size = kwargs['vocab_size']
        embedding_size = kwargs['embedding_size']
        memory_size = kwargs['memory_size']
        keep_prob = kwargs['keep_prob']
        subsequence_length = kwargs['subsequence_length']
        optimizer_spec = kwargs['optimizer']
        optimizer = optimizer_spec.create()
        self.learning_rate = optimizer_spec.learning_rate
        self.step_counter = optimizer_spec.step_counter

        ############################################################################################
        #                                        Net inputs                                        #
        ############################################################################################
        self.batch_size = placeholder(tf.int32, shape=[], name='batch_size')
        self.is_training = placeholder(tf.bool, shape=[], name='is_training')
        self.word_ids = placeholder(tf.int32,
                                    shape=(None, subsequence_length),
                                    name='word_ids')
        self.labels = placeholder(tf.int32, shape=(None, ), name='labels')
        self.hidden_state = placeholder(tf.float32,
                                        shape=(None, memory_size),
                                        name='hidden_state')
        self.cell_state = placeholder(tf.float32,
                                      shape=(None, memory_size),
                                      name='cell_state')

        lengths = sequence_lengths(self.word_ids)

        ############################################################################################
        #                                        Embedding                                         #
        ############################################################################################
        self.embedding_matrix, _bias = get_weights_and_bias(
            (vocab_size, embedding_size))
        embeddings = cond(
            self.is_training, lambda: nn.dropout(nn.embedding_lookup(
                self.embedding_matrix, self.word_ids),
                                                 keep_prob=keep_prob),
            lambda: nn.embedding_lookup(self.embedding_matrix, self.word_ids))

        ############################################################################################
        #                                        LSTM layer                                        #
        ############################################################################################
        cell = BasicLSTMCell(memory_size, activation=tf.nn.tanh)

        # during inference, use entire ensemble
        keep_prob = cond(self.is_training, lambda: constant(keep_prob),
                         lambda: constant(1.0))
        cell = DropoutWrapper(cell, output_keep_prob=keep_prob)

        # what's the difference to just creating a zero-filled tensor tuple?
        self.zero_state = cell.zero_state(self.batch_size, tf.float32)
        state = LSTMStateTuple(h=self.cell_state, c=self.hidden_state)

        # A dynamic rnn creates the graph on the fly, so it can deal with embeddings of different
        # lengths. We do not need to unstack the embedding tensor to get rows, instead we compute
        # the actual sequence lengths and pass that
        # We are not sure how any of this works. Do we need to mask the cost function so the cell
        # outputs for _NOT_A_WORD_ inputs are ignored? Is the final cell state really relevant if it
        # was last updated with _NOT_A_WORD_ input? Does static_rnn absolve us of any of those
        # issues?
        outputs, self.state = nn.dynamic_rnn(cell,
                                             embeddings,
                                             sequence_length=lengths,
                                             initial_state=state)
        # Recreate tensor from list
        outputs = reshape(concat(outputs, 1),
                          [-1, subsequence_length * memory_size])
        self.outputs = reduce_mean(outputs)

        ############################################################################################
        #                        Fully connected layer, loss, and training                         #
        ############################################################################################
        ff1 = fully_connected(outputs, 2, with_activation=False, use_bias=True)
        loss = reduce_mean(
            nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels,
                                                        logits=ff1))
        self.train_step = optimizer.minimize(loss,
                                             global_step=self.step_counter)
        self.predictions = nn.softmax(ff1)
        correct_prediction = equal(cast(argmax(self.predictions, 1), tf.int32),
                                   self.labels)
        self.accuracy = reduce_mean(cast(correct_prediction, tf.float32))

        ############################################################################################
        #                                    Create summaraies                                     #
        ############################################################################################
        with tf.variable_scope('summary'):
            self.summary_loss = tf.summary.scalar('loss', loss)
            self.summary_accuracy = tf.summary.scalar('accuracy',
                                                      self.accuracy)