def run(self, img): H, W = get_dim(img) count = 0 while count < self._num: (y, x, h, w) = self.__get_rand_block(H, W) tmp = img[y:y + h, x:x + w] count += 1 yield tmp, (y, x, h, w)
def run(self, img): H, W = get_dim(img) count = 0 while count < self._num: (y, x, h, w) = self.__get_rand_block(H, W) tmp = img[y:y+h, x:x+w] count += 1 yield tmp, (y, x, h, w)
def set_model(self, figs, labels, is_training, reuse=False): fig_shape = figs.get_shape().as_list() height, width = fig_shape[1:3] class_num = get_dim(labels) with tf.variable_scope(self.name_scope_label, reuse=reuse): tmp = linear_layer(labels, class_num, height * width, 'reshape') tmp = tf.reshape(tmp, [-1, height, width, 1]) h = tf.concat((figs, tmp), 3) # convolution with tf.variable_scope(self.name_scope_conv, reuse=reuse): for i, (in_chan, out_chan) in enumerate( zip(self.layer_chanels, self.layer_chanels[1:])): conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=2, l_id=i) if i == 0: h = tf.nn.relu(conved) #h = lrelu(conved) else: bn_conved = batch_norm(conved, i, is_training) h = tf.nn.relu(bn_conved) #h = lrelu(bn_conved) # full connect dim = get_dim(h) h = tf.reshape(h, [-1, dim]) with tf.variable_scope(self.name_scope_fc, reuse=reuse): h = linear_layer(h, dim, self.fc_dim, 'fc') h = batch_norm(h, 'en_fc_bn', is_training) h = tf.nn.relu(h) mu = linear_layer(h, self.fc_dim, self.z_dim, 'mu') log_sigma = linear_layer(h, self.fc_dim, self.z_dim, 'sigma') return mu, log_sigma
def run(self, img): self._img = img.copy() M, N = get_dim(img) tmp = MyThreshold(val=1, ksize=0).run(img) def enough_elem(x): return x.sum() / (self._w * self._h) * 100 > self._mep pts = [(y, x) for y in np.arange(0, M - self._w, self._w) for x in np.arange(0, N - self._h, self._h) if enough_elem(tmp[y:y + self._h, x:x + self._w])] self._pts = pts return pts
def run(self, img): self._img = img.copy() M, N = get_dim(img) tmp = MyThreshold(val=1, ksize=0).run(img) def enough_elem(x): return x.sum()/(self._w * self._h)*100 > self._mep pts = [(y, x) for y in np.arange(0, M - self._w, self._w) for x in np.arange(0, N - self._h, self._h) if enough_elem(tmp[y:y+self._h, x:x+self._w])] self._pts = pts return pts
def set_model(self, figs, is_training, reuse=False): # return only logits h = figs # convolution with tf.variable_scope(self.name_scope_conv, reuse=reuse): for i, (in_chan, out_chan) in enumerate( zip(self.layer_chanels, self.layer_chanels[1:])): if i == 0: conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=1, l_id=i) h = tf.nn.relu(conved) #h = lrelu(conved) else: conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=2, l_id=i) bn_conved = batch_norm(conved, i, is_training) h = tf.nn.relu(bn_conved) #h = lrelu(bn_conved) feature_image = h # full connect dim = get_dim(h) h = tf.reshape(h, [-1, dim]) with tf.variable_scope(self.name_scope_fc, reuse=reuse): weights = get_weights('fc', [dim, self.fc_dim], 0.02) biases = get_biases('fc', [self.fc_dim], 0.0) h = tf.matmul(h, weights) + biases h = batch_norm(h, 'fc', is_training) h = tf.nn.relu(h) weights = get_weights('fc2', [self.fc_dim, 1], 0.02) biases = get_biases('fc2', [1], 0.0) h = tf.matmul(h, weights) + biases return h, feature_image
def set_model(self, figs, is_training, reuse=False): u''' return only logits. not sigmoid(logits). ''' h = figs # convolution with tf.variable_scope(self.name_scope_conv, reuse=reuse): for i, (in_chan, out_chan) in enumerate( zip(self.layer_chanels, self.layer_chanels[1:])): conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=2, l_id=i) if i == 0: h = tf.nn.relu(conved) #h = lrelu(conved) else: bn_conved = batch_norm(conved, i, is_training) h = tf.nn.relu(bn_conved) #h = lrelu(bn_conved) # full connect dim = get_dim(h) h = tf.reshape(h, [-1, dim]) with tf.variable_scope(self.name_scope_fc, reuse=reuse): weights = get_weights('fc', [dim, self.fc_dim], 0.02) biases = get_biases('fc', [self.fc_dim], 0.0) h = tf.matmul(h, weights) + biases h = batch_norm(h, 'en_fc_bn', is_training) h = tf.nn.relu(h) weights = get_weights('mu', [self.fc_dim, self.z_dim], 0.02) biases = get_biases('mu', [self.z_dim], 0.0) mu = tf.matmul(h, weights) + biases weights = get_weights('sigma', [self.fc_dim, self.z_dim], 0.02) biases = get_biases('sigma', [self.z_dim], 0.0) log_sigma = tf.matmul(h, weights) + biases return mu, log_sigma
def set_model(self, figs, is_training, reuse = False): u''' return only logits. ''' h = figs # convolution with tf.variable_scope(self.name_scope, reuse = reuse): for i, (in_dim, out_dim) in enumerate(zip(self.layer_list, self.layer_list[1:])): h = linear_layer(h, in_dim, out_dim, i) h = batch_norm(h, i, is_training) h = tf.nn.relu(h) dim = get_dim(h) mu = linear_layer(h, dim, self.z_dim, 'mu') log_sigma = linear_layer(h, dim, self.z_dim, 'log_sigma') return mu, log_sigma