def _residual_drop(x, input_shape, output_shape, strides=(1, 1)): global add_tables #nb_filter = output_shape[0] nb_filter = 32 print(nb_filter) print(x.shape) conv = Convolution2D(nb_filter, (3, 3), subsample=strides, padding="same", kernel_regularizer=L2(weight_decay))(x) conv = BN(axis=1)(conv) conv = Activation("relu")(conv) conv = Convolution2D(nb_filter, (3, 3), padding="same", kernel_regularizer=L2(weight_decay))(conv) conv = BN(axis=1)(conv) if strides[0] >= 2: x = AveragePooling2D(strides)(x) if (output_shape[0] - input_shape[0]) > 0: pad_shape = (1, output_shape[0] - input_shape[0], output_shape[1], output_shape[2]) padding = K.zeros(pad_shape) padding = K.repeat_elements(padding, K.shape(x)[0], axis=0) x = Lambda(lambda y: K.concatenate([y, padding], axis=1), output_shape=output_shape)(x) _death_rate = K.variable(death_rate) scale = K.ones_like(conv) - _death_rate conv = Lambda(lambda c: K.in_test_phase(scale * c, c), output_shape=output_shape)(conv) print(x.shape) print(conv.shape) out = add([x, x]) out = Activation("relu")(out) gate = K.variable(1, dtype="uint8") add_tables += [{"death_rate": _death_rate, "gate": gate}] return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]), output_shape=output_shape)([out, x])
def residual_drop(x, input_shape, output_shape, strides=(1, 1)): global add_tables nb_filter = output_shape[0] conv = Convolution2D(nb_filter, 3, 3, subsample=strides, border_mode="same")(x) conv = BatchNormalization(axis=1)(conv) conv = Activation("relu")(conv) conv = Convolution2D(nb_filter, 3, 3, border_mode="same")(conv) conv = BatchNormalization(axis=1)(conv) if strides[0] >= 2: x = AveragePooling2D(strides)(x) if (output_shape[0] - input_shape[0]) > 0: pad_shape = (1, output_shape[0] - input_shape[0], output_shape[1], output_shape[2]) padding = K.ones(pad_shape) padding = K.repeat_elements(padding, K.shape(x)[0], axis=0) x = Lambda(lambda y: K.concatenate([y, padding], axis=1), output_shape=output_shape)(x) _death_rate = K.variable(death_rate) scale = K.ones_like(conv) - _death_rate conv = Lambda(lambda c: K.in_test_phase(scale * c, c), output_shape=output_shape)(conv) out = merge([conv, x], mode="sum") out = Activation("relu")(out) gate = K.variable(1, dtype="uint8") add_tables += [{"death_rate": _death_rate, "gate": gate}] return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]), output_shape=output_shape)([out, x])
def _stochastic_survival(y, p_survival=1.0): # binomial random variable survival = K.random_binomial((1, ), p=p_survival) # during testing phase: # - scale y (see eq. (6)) # - p_survival effectively becomes 1 for all layers (no layer dropout) return K.in_test_phase( tf.constant(p_survival, dtype='float32') * y, survival * y)
def stochastic_survival(y, p_survival=1.0): # binomial random variable shape = (1,) dtype = K.floatx() seed = np.random.randint(10e6) p = p_survival survival=tf.where(tf.random_uniform(shape, dtype=dtype, seed=seed) <= p, tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype)) return K.in_test_phase(tf.constant(p_survival, dtype='float32') * y, survival * y) #note to self: was weirdly spaced before.
def call(self, inputs, training=None): if 0. < self.rate < 1.: noise_shape = self._get_noise_shape(inputs) def dropped_inputs(): return K.dropout(inputs, self.rate, noise_shape, seed=self.seed) if (training): return K.in_train_phase(dropped_inputs, inputs, training=training) else: return K.in_test_phase(dropped_inputs, inputs, training=None) return inputs
def residual_drop(x, input_shape, output_shape, strides=(1, 1)): global add_tables nb_filter = output_shape[0] conv = Convolution2D(nb_filter, 3, 3, subsample=strides, border_mode="same", W_regularizer=l2(weight_decay))(x) conv = BatchNormalization(axis=1)(conv) conv = Activation("relu")(conv) conv = Convolution2D(nb_filter, 3, 3, border_mode="same", W_regularizer=l2(weight_decay))(conv) conv = BatchNormalization(axis=1)(conv) if strides[0] >= 2: x = AveragePooling2D(strides)(x) if (output_shape[0] - input_shape[0]) > 0: pad_shape = (1, output_shape[0] - input_shape[0], output_shape[1], output_shape[2]) padding = K.zeros(pad_shape) padding = K.repeat_elements(padding, K.shape(x)[0], axis=0) x = Lambda(lambda y: K.concatenate([y, padding], axis=1), output_shape=output_shape)(x) _death_rate = K.variable(death_rate) scale = K.ones_like(conv) - _death_rate conv = Lambda(lambda c: K.in_test_phase(scale * c, c), output_shape=output_shape)(conv) out = merge([conv, x], mode="sum") out = Activation("relu")(out) gate = K.variable(1, dtype="uint8") add_tables += [{"death_rate": _death_rate, "gate": gate}] return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]), output_shape=output_shape)([out, x])
def residual_drop(x, input_shape, output_shape, strides=(1, 1)): global add_tables nb_filter = output_shape[0] conv = Conv2D(nb_filter, (3, 3), strides=strides, padding="same", kernel_regularizer=l2(weight_decay))(x) conv = BatchNormalization()(conv) conv = Activation("relu")(conv) conv = Conv2D(nb_filter, (3, 3), padding="same", kernel_regularizer=l2(weight_decay))(conv) conv = BatchNormalization()(conv) if strides[0] >= 2: x = AveragePooling2D(strides)(x) if (output_shape[0] - input_shape[0]) > 0: pad_shape = (1, output_shape[1], output_shape[2], output_shape[0] - input_shape[0]) padding = K.zeros(pad_shape) padding = K.repeat_elements(padding, batch_size, axis=0) print(padding.get_shape().as_list()) x = Lambda(lambda y: K.concatenate([y, padding], axis=3), output_shape=(output_shape[1], output_shape[2], output_shape[0]))(x) _death_rate = K.variable(death_rate) scale = K.ones_like(conv) - _death_rate conv = Lambda(lambda c: K.in_test_phase(scale * c, c), output_shape=(output_shape[1], output_shape[2], output_shape[0]))(conv) out = Add()([conv, x]) out = Activation("relu")(out) gate = K.variable(1.0, dtype="float32") add_tables += [{"death_rate": _death_rate, "gate": gate}] return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]), output_shape=(output_shape[1], output_shape[2], output_shape[0]))([out, x])
def func(x, drop_rate=drop_rate): scale = K.ones_like(x) - drop_rate return K.in_test_phase(scale * x, x)
def call(self, x, mask=None): return K.in_test_phase((K.ones_like(x) - self.death_rate) * x, x)
def stochastic_survival(y, p_survival=1.0): survival = K.random_binomial((1, ), p=p_survival) return K.in_test_phase( tf.constant(p_survival, dtype='float32') * y, survival * y)
def call(self, x, mask=None): x = K.in_test_phase(one_hot(x), x) #x = K.in_test_phase(one_hot(x), one_hot(x)) return x
def call(self, x, mask=None): return K.in_test_phase((K.ones_like(x) - self.death_rate) * x, x)
def stochastic_survival(input, survival=1.0): survival = K.random_binomial((1,), p=survival) return K.in_test_phase(K.variable(survival, dtype='float32') * input, survival * input)