def _create_model(self): input_im = self.model_input[0] keep_prob = self.model_input[1] conv_out = self._create_conv(input_im) init_b = tf.truncated_normal_initializer(stddev=0.01) conv_cam = conv(conv_out, 3, 1024, 'conv_cam', nl=tf.nn.relu, wd=0.01, init_b=init_b) gap = global_avg_pool(conv_cam) dropout_gap = dropout(gap, keep_prob, self.is_training) with tf.variable_scope('cam'): init = tf.truncated_normal_initializer(stddev=0.01) #fc_w: shape is (1024, nclass) fc_w = new_weights( 'weights', 1, [gap.get_shape().as_list()[-1], self._num_class], initializer=init, wd=0.01) fc_cam = tf.matmul(dropout_gap, fc_w, name='output') self.output = tf.identity(fc_cam, 'model_output') self.prediction = tf.argmax(fc_cam, name='pre_label', axis=-1) self.prediction_pro = tf.nn.softmax(fc_cam, name='pre_pro') if self._inspect_class is not None: with tf.name_scope('classmap'): self.get_classmap(self._inspect_class, conv_cam, input_im)
def _create_conv(self, inputs): self.dropout = tf.placeholder(tf.float32, name='dropout') mlpconv_1 = mlpconv(inputs, filter_size=8, hidden_size=[96, 96], name='mlpconv_1', wd=self._wd) # mlpconv_1 = mlpconv( # inputs, # filter_size=5, # hidden_size=[192, 160, 96], # name='mlpconv_1', # wd=self._wd) mlpconv_1 = max_pool(mlpconv_1, 'pool1', padding='SAME') mlpconv_1 = dropout(mlpconv_1, self.dropout, self._is_traing) mlpconv_1 = batch_norm(mlpconv_1, train=self._is_traing, name='bn_1') mlpconv_2 = mlpconv(mlpconv_1, filter_size=8, hidden_size=[192, 192], name='mlpconv_2', wd=self._wd) # mlpconv_2 = mlpconv( # mlpconv_1, # filter_size=5, # hidden_size=[192, 192, 192], # name='mlpconv_2', # wd=self._wd) mlpconv_2 = max_pool(mlpconv_2, 'pool2', padding='SAME') mlpconv_2 = dropout(mlpconv_2, self.dropout, self._is_traing) mlpconv_2 = batch_norm(mlpconv_2, train=self._is_traing, name='bn_2') mlpconv_3 = mlpconv(mlpconv_2, filter_size=5, hidden_size=[192, self._n_class], name='mlpconv_3', wd=self._wd) # mlpconv_3 = mlpconv( # mlpconv_2, # filter_size=3, # hidden_size=[192, 192, self._n_class], # name='mlpconv_3', # wd=self._wd) # mlpconv_3 = max_pool(mlpconv_3, 'pool3', padding='SAME') # mlpconv_3 = dropout(pool3, 0.5, self._is_traing) return mlpconv_3
def _create_model(self): input_im = self.model_input[0] keep_prob = self.model_input[1] net = GoogleNet(num_class=self.nclass, num_channels=self.nchannel, im_height=self.im_height, im_width=self.im_width, is_load=self._is_load, pre_train_path=self._pre_train_path, is_rescale=False, trainable=False) net.create_model([input_im, keep_prob]) conv_out = net.layer['conv_out'] # gap = global_avg_pool(conv_out) # gap_dropout = dropout(gap, keep_prob, self.is_training) arg_scope = tf.contrib.framework.arg_scope with arg_scope([fc], trainable=True, wd=5e-4): fc6 = fc(conv_out, 1024, 'fc6') fc6_bn = batch_norm(fc6, train=self.is_training, name='fc6_bn') fc6_act = tf.nn.relu(fc6_bn) dropout_fc6 = dropout(fc6_act, keep_prob, self.is_training) # fc7 = fc(dropout_fc6, 2048, 'fc7') # fc7_bn = batch_norm(fc7, train=self.is_training, name='fc7_bn') # fc7_act = tf.nn.relu(fc7_bn) # dropout_fc7 = dropout(fc7_act, keep_prob, self.is_training) # fc7 = fc(dropout_fc6, 4096, 'fc7', nl=tf.nn.relu) # dropout_fc7 = dropout(fc7, keep_prob, self.is_training) fc8 = fc(dropout_fc6, self.nclass, 'fc8') # self.layer['fc6'] = fc6 # self.layer['fc7'] = fc7 self.layer['fc8'] = self.layer['output'] = fc8 self.layer['class_prob'] = tf.nn.softmax(fc8, name='class_prob') self.layer['pre_prob'] = tf.reduce_max(self.layer['class_prob'], axis=-1, name='pre_prob')
def _create_model(self): with tf.name_scope('input'): input_im = self.model_input[0] keep_prob = self.model_input[1] if self._is_rescale: input_im =\ resize_tensor_image_with_smallest_side(input_im, 224) self.layer['input'] = input_im red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=input_im) input_bgr = tf.concat(axis=3, values=[ blue - MEAN[0], green - MEAN[1], red - MEAN[2], ]) data_dict = {} if self._is_load: data_dict = np.load(self._pre_train_path, encoding='latin1').item() inception5b = self._create_conv(input_bgr, data_dict) gap = global_avg_pool(inception5b) gap_dropout = dropout(gap, keep_prob, self.is_training) fc1 = fc(gap_dropout, 1000, 'loss3_classifier', data_dict=data_dict) self.layer['conv_out'] = inception5b self.layer['output'] = fc1 self.layer['class_prob'] = tf.nn.softmax(fc1, name='class_prob') self.layer['pre_prob'] = tf.reduce_max(self.layer['class_prob'], axis=-1, name='pre_prob') self.layer['prediction'] = tf.argmax(self.layer['output'], axis=-1)