Esempio n. 1
0
 def build_network(self):
     """
     Building network for mnist
     """
     with tf.name_scope('reshape'):
         try:
             input_dim = int(math.sqrt(self.x_dim))
         except:
             print('input dim cannot be sqrt and reshape. input dim: ' +
                 str(self.x_dim))
             logger.debug(
                 'input dim cannot be sqrt and reshape. input dim: %s',
                 str(self.x_dim))
             raise
         x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
     with tf.name_scope('conv1'):
         w_conv1 = weight_variable([self.conv_size, self.conv_size, 1,
             self.channel_1_num])
         b_conv1 = bias_variable([self.channel_1_num])
         h_conv1 = nni.function_choice(lambda : tf.nn.relu(conv2d(
             x_image, w_conv1) + b_conv1), lambda : tf.nn.sigmoid(conv2d
             (x_image, w_conv1) + b_conv1), lambda : tf.nn.tanh(conv2d(
             x_image, w_conv1) + b_conv1), name='tf.nn.relu')
     with tf.name_scope('pool1'):
         h_pool1 = nni.function_choice(lambda : max_pool(h_conv1, self.
             pool_size), lambda : avg_pool(h_conv1, self.pool_size),
             name='max_pool')
     with tf.name_scope('conv2'):
         w_conv2 = weight_variable([self.conv_size, self.conv_size, self
             .channel_1_num, self.channel_2_num])
         b_conv2 = bias_variable([self.channel_2_num])
         h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
     with tf.name_scope('pool2'):
         h_pool2 = max_pool(h_conv2, self.pool_size)
     last_dim = int(input_dim / (self.pool_size * self.pool_size))
     with tf.name_scope('fc1'):
         w_fc1 = weight_variable([last_dim * last_dim * self.
             channel_2_num, self.hidden_size])
         b_fc1 = bias_variable([self.hidden_size])
     h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.
         channel_2_num])
     h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
     with tf.name_scope('dropout'):
         h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
     with tf.name_scope('fc2'):
         w_fc2 = weight_variable([self.hidden_size, self.y_dim])
         b_fc2 = bias_variable([self.y_dim])
         y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
     with tf.name_scope('loss'):
         cross_entropy = tf.reduce_mean(tf.nn.
             softmax_cross_entropy_with_logits(labels=self.labels,
             logits=y_conv))
     with tf.name_scope('adam_optimizer'):
         self.train_step = tf.train.AdamOptimizer(self.learning_rate
             ).minimize(cross_entropy)
     with tf.name_scope('accuracy'):
         correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(
             self.labels, 1))
         self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.
             float32))
Esempio n. 2
0
 def test_default_name_func(self):
     val = nni.function_choice({
         'max(1, 2, 3)': lambda: max(1, 2, 3),
         'min(1, 2)':
         lambda: min(1, 2)  # NOTE: assign this line number to lineno2
     })
     self.assertEqual(val, 3)
Esempio n. 3
0
 def test_lambda_func(self):
     val = nni.function_choice(
         {
             "lambda: 2*3": lambda: 2 * 3,
             "lambda: 3*4": lambda: 3 * 4
         },
         name='lambda_func')
     self.assertEqual(val, 6)
Esempio n. 4
0
 def test_func(self):
     val = nni.function_choice({
         'foo': foo,
         'bar': bar
     },
                               name='func',
                               key='test_smartparam/func/function_choice')
     self.assertEqual(val, 'bar')
Esempio n. 5
0
 def test_lambda_func(self):
     val = nni.function_choice(
         {
             "lambda: 2*3": lambda: 2 * 3,
             "lambda: 3*4": lambda: 3 * 4
         },
         name='lambda_func',
         key='test_smartparam/lambda_func/function_choice')
     self.assertEqual(val, 6)
Esempio n. 6
0
 def test_default_name_func(self):
     val = nni.function_choice(
         lambda: max(1, 2, 3),
         lambda: 2 * 2  # NOTE: assign this line number to lineno2
     )
     self.assertEqual(val, 3)
Esempio n. 7
0
 def test_specified_name_func(self):
     val = nni.function_choice(foo, bar, name='func')
     self.assertEqual(val, 'bar')
Esempio n. 8
0
import nni


def max_pool(k):
    pass


h_conv1 = 1
conv_size = nni.choice(2, 3, 5, 7, name='conv_size')
h_pool1 = nni.function_choice(lambda: max_pool(h_conv1),
                              lambda: avg_pool(h_conv2, h_conv3),
                              name='max_pool')
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
Esempio n. 9
0
import nni


def max_pool(k):
    pass


h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({
    'max_pool': lambda: max_pool(h_conv1),
    'h_conv1': lambda: h_conv1,
    'avg_pool': lambda: avg_pool(h_conv2, h_conv3)
})
h_pool1 = nni.function_choice(
    {
        'max_pool(h_conv1)': lambda: max_pool(h_conv1),
        'avg_pool(h_conv2, h_conv3)': lambda: avg_pool(h_conv2, h_conv3)
    },
    name='max_pool')
h_pool2 = nni.function_choice(
    {
        'max_poo(h_conv1)': lambda: max_poo(h_conv1),
        '(2 * 3 + 4)': lambda: 2 * 3 + 4,
        '(lambda x: 1 + x)': lambda: lambda x: 1 + x
    },
    name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
Esempio n. 10
0
    def build_network(self):
        '''
        Building network for mnist
        '''

        # Reshape to use within a convolutional neural net.
        # Last dimension is for "features" - there is only one here, since images are
        # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
        with tf.name_scope('reshape'):
            try:
                input_dim = int(math.sqrt(self.x_dim))
            except:
                print('input dim cannot be sqrt and reshape. input dim: ' +
                      str(self.x_dim))
                logger.debug(
                    'input dim cannot be sqrt and reshape. input dim: %s',
                    str(self.x_dim))
                raise
            x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])

        # First convolutional layer - maps one grayscale image to 32 feature maps.
        with tf.name_scope('conv1'):
            w_conv1 = weight_variable(
                [self.conv_size, self.conv_size, 1, self.channel_1_num])
            b_conv1 = bias_variable([self.channel_1_num])
            h_conv1 = nni.function_choice(
                lambda: tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1),
                lambda: tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1),
                lambda: tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1
                                   ))  # example: without name

        # Pooling layer - downsamples by 2X.
        with tf.name_scope('pool1'):
            h_pool1 = max_pool(h_conv1, self.pool_size)
            h_pool1 = nni.function_choice(
                lambda: max_pool(h_conv1, self.pool_size),
                lambda: avg_pool(h_conv1, self.pool_size),
                name='h_pool1')

        # Second convolutional layer -- maps 32 feature maps to 64.
        with tf.name_scope('conv2'):
            w_conv2 = weight_variable([
                self.conv_size, self.conv_size, self.channel_1_num,
                self.channel_2_num
            ])
            b_conv2 = bias_variable([self.channel_2_num])
            h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)

        # Second pooling layer.
        with tf.name_scope('pool2'):  # example: another style
            h_pool2 = max_pool(h_conv2, self.pool_size)

        # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
        # is down to 7x7x64 feature maps -- maps this to 1024 features.
        last_dim = int(input_dim / (self.pool_size * self.pool_size))
        with tf.name_scope('fc1'):
            w_fc1 = weight_variable(
                [last_dim * last_dim * self.channel_2_num, self.hidden_size])
            b_fc1 = bias_variable([self.hidden_size])

        h_pool2_flat = tf.reshape(
            h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)

        # Dropout - controls the complexity of the model, prevents co-adaptation of features.
        with tf.name_scope('dropout'):
            h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # Map the 1024 features to 10 classes, one for each digit
        with tf.name_scope('fc2'):
            w_fc2 = weight_variable([self.hidden_size, self.y_dim])
            b_fc2 = bias_variable([self.y_dim])
            y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2

        with tf.name_scope('loss'):
            cross_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(labels=self.labels,
                                                        logits=y_conv))
        with tf.name_scope('adam_optimizer'):
            self.train_step = tf.train.AdamOptimizer(
                self.learning_rate).minimize(cross_entropy)

        with tf.name_scope('accuracy'):
            correct_prediction = tf.equal(tf.argmax(y_conv, 1),
                                          tf.argmax(self.labels, 1))
            self.accuracy = tf.reduce_mean(
                tf.cast(correct_prediction, tf.float32))