input2=input_b, max_displacement=max_displacement, kernel_size=kernel_size, stride1=stride1, stride2=stride2, pad_size=pad) if nd.scope.correlation_leaky_relu(): corr_out = tf.nn.leaky_relu(corr_out) else: corr_out = tf.nn.relu(corr_out) return corr_out register_op('correlation_2d', _correlation) def _warp(data, flow, nearest=False): # TODO nearest? return flow_warp(data, flow) register_op('warp', _warp) def _correlation_1d(input_a, input_b, **kwargs): max_displacement = kwargs.pop('max_displacement', None) kernel_size = kwargs.pop('kernel_size', None) stride1 = kwargs.pop('stride_1', 1)
from netdef_slim.core.register import register_op import netdef_slim as nd import tensorflow as tf from sklearn.metrics import f1_score import numpy as np nothing = None def _py_fmeasure(pred, gt): valid_gt = gt[np.logical_not(np.isnan(gt))] valid_pred = pred[np.logical_not(np.isnan(gt))] return f1_score(valid_gt.flatten() > 0, valid_pred.flatten() > 0) def _f_measure(pred, gt, name): name = name.replace('[', '-').replace(']', '-') pred = tf.to_float(pred) gt = tf.to_float(gt) fm = tf.py_func(_py_fmeasure, [pred, gt], tf.double, name=name, stateful=False) tf.add_to_collection('metric_ops', fm) return fm register_op('f_measure', _f_measure)
kernel_size, strides=stride, data_format='channels_first', trainable=nd.scope.learn(), reuse=tf.AUTO_REUSE, activation=activation, kernel_regularizer=k_regularizer, kernel_initializer=k_initializer, bias_initializer=b_initializer, name=name, )) return outputs register_op('conv', _conv) def _conv_relu(input, **kwargs): return _conv(input, activation=leaky_relu, **kwargs) register_op('conv_relu', _conv_relu) def _conv_elu(input, **kwargs): return _conv(input, activation=tf.nn.elu, **kwargs) register_op('conv_elu', _conv_elu)
from netdef_slim.core.register import register_op import tensorflow as tf from lmbspecialops import resample nothing = None # ---------------------------------------------------------------------- def _crop(image, width, height): data = tf.transpose(image, perm=[0,2,3,1]) data = tf.image.resize_image_with_crop_or_pad(data, [tf.to_int32(height), tf.to_int32(width)]) return tf.transpose(data, perm=[0,3,1,2]) register_op('crop', _crop) # ---------------------------------------------------------------------- def _resample(image, width=None, height=None, reference=None, factor=1.0, type='LINEAR', antialias=True): types = {'LINEAR': tf.image.ResizeMethod.BILINEAR} data = image # data = tf.transpose(image, perm=[0,2,3,1]) if reference is not None: b, c, h, w = reference.get_shape().as_list() # data = tf.image.resize_images(data, [h, w], method=types[type]) data = resample(data, w, h, antialias, type) elif height is not None and width is not None: # data = tf.image.resize_images(data, [tf.to_int32(height), tf.to_int32(width)], method=types[type]) data = resample(data, width, height, antialias, type) else: raise ValueError
import tensorflow as tf from .blob import _slice import numpy as np import netdef_slim as nd nothing = None # ---------------------------------------------------------------------- def _threshold(tensor, thresh): condition = tf.less(tensor, thresh) return tf.where(condition, _const_like(tensor, 0.0), _const_like(tensor, 1.0)) register_op('threshold', _threshold) # ---------------------------------------------------------------------- _scale_conv_n = 0 def _scale(tensor, factor): ''' Factor can be a scalar or a list. In case of a list each channel is scaled by a different factor. ''' global _scale_conv_n if type(factor) is list or type(factor) is tuple: _scale_conv_n += 1 kernel = tf.constant(factor) kernel = tf.reshape(kernel, (1, 1, len(factor), 1))
from netdef_slim.core.register import register_op import tensorflow as tf nothing = None # ---------------------------------------------------------------------- def _image_to_range_01(image, include_phase=None): return tf.multiply(image, 1.0/255.0) register_op('image_to_range_01', _image_to_range_01) # ---------------------------------------------------------------------- def _image_to_range_255(image): return tf.multiply(image, 255.0) register_op('image_to_range_255', _image_to_range_255) # ---------------------------------------------------------------------- def _scale_and_subtract_mean(image, mean=0.4): return tf.subtract(tf.multiply(image, 1.0/255.0), mean) register_op('scale_and_subtract_mean', _scale_and_subtract_mean) # ---------------------------------------------------------------------- def _add_mean_and_scale(image, mean=0.4): return tf.multiply(tf.add(image, mean), 255.0)
padding=pad, data_format='channels_first', trainable=nd.scope.learn(), activation=activation, reuse=tf.AUTO_REUSE, kernel_initializer=k_initializer, kernel_regularizer=k_regularizer, bias_initializer=b_initializer, use_bias=True, name=name, )) return outputs register_op('upconv', _upconv) def _upconv_relu(input, **kwargs): return _upconv(input, leaky_relu, **kwargs) register_op('upconv_relu', _upconv_relu) def _upconv_elu(input, **kwargs): return _upconv(input, tf.nn.elu, **kwargs) register_op('upconv_elu', _upconv_elu)
size_splits = [] last_point = 0 for p in slice_points: size_splits.append(p-last_point) last_point = p size_splits.append(tensor.get_shape().as_list()[axis]-last_point) # print(tensor) # print(slice_points) # print(size_splits) out = tf.split(tensor, size_splits, axis=axis) return out register_op('slice', _slice) # ---------------------------------------------------------------------- def _blobFromScalar(scalar): return tf.Variable(scalar) register_op('blobFromScalar', _blobFromScalar) # ---------------------------------------------------------------------- def _concat(*args, **kwargs): ''' The counterpart to sliice. All input blobs will be concatenated. ''' axis = 1