Esempio n. 1
0
 def _get_nonseq_folded_model(x_shape):
     x = x_in = layers.Input(x_shape, name="input")
     x1 = layers.Conv2D(filters=1,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        name="conv2d_1")(x)
     x2 = layers.Conv2D(filters=1,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        name="conv2d_2")(x)
     x = layers.Maximum()([x1, x2])
     x = QConv2DBatchnorm(filters=2,
                          kernel_size=(2, 2),
                          strides=(4, 4),
                          kernel_initializer="ones",
                          bias_initializer="zeros",
                          use_bias=False,
                          kernel_quantizer=kernel_quantizer,
                          beta_initializer="zeros",
                          gamma_initializer="ones",
                          moving_mean_initializer="zeros",
                          moving_variance_initializer="ones",
                          folding_mode=folding_mode,
                          ema_freeze_delay=ema_freeze_delay,
                          name="foldconv2d")(x)
     x = layers.Flatten(name="flatten")(x)
     x = layers.Dense(2,
                      use_bias=False,
                      kernel_initializer="ones",
                      name="dense")(x)
     model = Model(inputs=[x_in], outputs=[x])
     model.layers[4].set_weights(
         [kernel, gamma, beta, iteration, moving_mean, moving_variance])
     return model
Esempio n. 2
0
def test_populate_bias_quantizer_from_accumulator():
  """Test populate_bias_quantizer_from_accumulator function.

  Define a qkeras model with a QConv2DBatchnorm layer. Set bias quantizer in the
  layer as None. Call populate_bias_quantizer_from_accumulator function
  to automatically generate bias quantizer type from the MAC accumulator type.
  Set the bias quantizer accordingly in the model.

  Call populate_bias_quantizer_from_accumulator again in this model. This time
  since bias quantizer is already set, populate_bias_quantizer_from_accumulator
  function should not change the bias quantizer.
  """

  x_shape = (2, 2, 1)

  # get a qkeras model with QConv2DBatchnorm layer. Set bias quantizer in the
  # layer as None.
  x = x_in = layers.Input(x_shape, name="input")
  x1 = QConv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), use_bias=False,
               kernel_quantizer="quantized_bits(4, 0, 1)", name="conv2d_1")(x)
  x2 = QConv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), use_bias=False,
               kernel_quantizer="quantized_bits(4, 0, 1)", name="conv2d_2")(x)
  x = layers.Maximum()([x1, x2])
  x = QActivation("quantized_relu(4, 1)")(x)
  x = QConv2DBatchnorm(
      filters=2, kernel_size=(2, 2), strides=(4, 4),
      kernel_initializer="ones", bias_initializer="zeros", use_bias=False,
      kernel_quantizer="quantized_bits(4, 0, 1)", bias_quantizer=None,
      beta_initializer="zeros",
      gamma_initializer="ones", moving_mean_initializer="zeros",
      moving_variance_initializer="ones", folding_mode="batch_stats_folding",
      ema_freeze_delay=10,
      name="foldconv2d")(x)
  x1 = x
  x2 = layers.Flatten(name="flatten")(x)
  x2 = QDense(2, use_bias=False, kernel_initializer="ones",
              kernel_quantizer="quantized_bits(6, 2, 1)", name="dense")(x2)
  model = Model(inputs=[x_in], outputs=[x1, x2])
  assert_equal(model.layers[5].get_quantizers()[1], None)

  # Call populate_bias_quantizer_from_accumulator function
  # to automatically generate bias quantizer from the MAC accumulator type.
  _ = bn_folding_utils.populate_bias_quantizer_from_accumulator(
      model, ["quantized_bits(8, 0, 1)"])
  q = model.layers[5].get_quantizers()[1]
  assert_equal(q.__str__(), "quantized_bits(10,3,1)")

  # Call populate_bias_quantizer_from_accumulator function again
  # bias quantizer should not change
  _ = bn_folding_utils.populate_bias_quantizer_from_accumulator(
      model, ["quantized_bits(8, 0, 1)"])
  q = model.layers[5].get_quantizers()[1]
  assert_equal(q.__str__(), "quantized_bits(10,3,1)")
Esempio n. 3
0
def squeeze_excite_block(input, ratio=0.25):
    cse = channel_squeeze_excite_block(input, ratio)
    sse = spatial_squeeze_excite_block(input)
    output = layers.Maximum()([cse, sse])
    return output
Esempio n. 4
0
def ecnn(input_shape=(320,320,3), base_model_input_shape=(40,40,3), name='ECNN', num_classes=10, augment=False, auxiliary=False, sampling=True, scales='all', pooling=None, dropout=False, gaze=None, scale4_freeze=False, return_logits=False):
	#ImageNet cortical sampling model 

	#check args
	if input_shape != (320, 320, 3):
		raise ValueError

	if base_model_input_shape != (40, 40, 3):
		raise ValueError

	if scales not in ['all', 'scale4']:
		raise ValueError

	if scales != 'all' and auxiliary:
		raise ValueError

	if pooling is not None:
		if pooling not in ['max', 'avg']:
			raise ValueError

	if pooling is not None and dropout:
		raise NotImplementedError

	if scales == 'scale4':
		if dropout:
			raise NotImplementedError
		if pooling is not None:
			raise ValueError

	#base models
	if scales == 'all':
		scale1_network = ResNet18(include_top=False, input_shape=base_model_input_shape, conv1_stride=1, max_pool_stride=1, filters=45, subnetwork_name='scale1-{}'.format(name), pooling='avg')	
		scale2_network = ResNet18(include_top=False, input_shape=base_model_input_shape, conv1_stride=1, max_pool_stride=1, filters=45, subnetwork_name='scale2-{}'.format(name), pooling='avg')
		scale3_network = ResNet18(include_top=False, input_shape=base_model_input_shape, conv1_stride=1, max_pool_stride=1, filters=45, subnetwork_name='scale3-{}'.format(name), pooling='avg')
	scale4_network = ResNet18(include_top=False, input_shape=base_model_input_shape, conv1_stride=1, max_pool_stride=1, filters=45, subnetwork_name='scale4-{}'.format(name), pooling='avg')		
	
	model_input = layers.Input(shape=input_shape)

	#data augmentation
	if augment:
		x = layers.Lambda(lambda tensor: glimpse.image_augmentation(tensor, dataset='imagenet10'), name='image_augmentation')(model_input)
	else:
		x = model_input 

	#preprocess
	if gaze is not None:
		gaze_x = tf.constant(gaze[0], tf.int32)
		gaze_y = tf.constant(gaze[1], tf.int32)
		gaze = [gaze_x, gaze_y]
	else:
		#img shape (320, 320, 3)
		if not scale4_freeze:
			gaze = 40
		else:
			gaze = 80

	if not scale4_freeze:
		scale_sizes = [40, 80, 160, 240]
		scale_radii = [1, 2, 4, 6]
	else:
		scale_sizes = [40, 80, 160, 320]
		scale_radii = [1, 2, 4, 8]
		
	scale_center = [input_shape[0] // 2, input_shape[0] // 2]

	if not sampling:
		scales_x = layers.Lambda(lambda tensor: glimpse.image_scales(tensor, scale_center, scale_radii, scale_sizes, gaze, scale4_freeze), name='scale_sampling')(x)
	else:
		scales_x = layers.Lambda(lambda tensor: glimpse.warp_image_and_image_scales(tensor, input_shape[0], input_shape[0], scale_center, scale_radii, scale_sizes, gaze, scale4_freeze), name='nonuniform_and_scale_sampling')(x)
		
	#unpack scales
	scale1_x = scales_x[0]
	scale2_x = scales_x[1]
	scale3_x = scales_x[2]
	scale4_x = scales_x[3]

	if scales == 'all':
		scale1_x = scale1_network(scale1_x)
		scale2_x = scale2_network(scale2_x)
		scale3_x = scale3_network(scale3_x)
	scale4_x = scale4_network(scale4_x)

	if scales == 'all':
		if pooling is None:
			x = layers.concatenate([scale1_x, scale2_x, scale3_x, scale4_x])
		elif pooling == 'avg':
			x = layers.Average()([scale1_x, scale2_x, scale3_x, scale4_x])
		elif pooling == 'max':
			x = layers.Maximum()([scale1_x, scale2_x, scale3_x, scale4_x])
		else:
			raise ValueError

		if dropout:
			x = layers.Dropout(0.75)(x)
	elif scales == 'scale4':
		x = scale4_x
	else:
		raise ValueError

	if not return_logits:
		model_output = layers.Dense(num_classes, activation='softmax', name='probs')(x)
	else:
		model_output = layers.Dense(num_classes, activation=None, name='probs')(x)

	if auxiliary:
		#aux output

		if return_logits:
			raise NotImplementedError

		scale1_aux_out = layers.Dense(num_classes, activation='softmax', name='scale1_aux_probs')(scale1_x)
		scale2_aux_out = layers.Dense(num_classes, activation='softmax', name='scale2_aux_probs')(scale2_x)
		scale3_aux_out = layers.Dense(num_classes, activation='softmax', name='scale3_aux_probs')(scale3_x)
		scale4_aux_out = layers.Dense(num_classes, activation='softmax', name='scale4_aux_probs')(scale4_x)

		model = tf.keras.models.Model(inputs=model_input, outputs=[model_output, scale1_aux_out, scale2_aux_out, scale3_aux_out, scale4_aux_out])
	else:
		model = tf.keras.models.Model(inputs=model_input, outputs=model_output)

	return model