def __get_version(version): # matching 1.6.1, and 1.6.1rc, 1.6.1.dev version_regex = '^\d+\.\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version)
version_regex = '^\d+\.\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) # --------------------------------------------------------------------------------------- HAS_SKLEARN = True SKLEARN_MIN_VERSION = '0.15' def __get_sklearn_version(version): # matching 0.15b, 0.16bf, etc version_regex = '^\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) try: import sklearn if __get_sklearn_version(sklearn.__version__) < _StrictVersion(SKLEARN_MIN_VERSION): HAS_SKLEARN = False _logging.warn(('scikit-learn version %s is not supported. Minimum required version: %s. ' 'Disabling scikit-learn conversion API.') % (sklearn.__version__, SKLEARN_MIN_VERSION) ) except: HAS_SKLEARN = False # --------------------------------------------------------------------------------------- HAS_LIBSVM = True try: import svm except: HAS_LIBSVM = False # ---------------------------------------------------------------------------------------
HAS_SKLEARN = True SKLEARN_VERSION = None SKLEARN_MIN_VERSION = '0.17' def __get_sklearn_version(version): # matching 0.15b, 0.16bf, etc version_regex = '^\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) try: import sklearn SKLEARN_VERSION = __get_sklearn_version(sklearn.__version__) if SKLEARN_VERSION < _StrictVersion(SKLEARN_MIN_VERSION): HAS_SKLEARN = False _logging.warn(( 'scikit-learn version %s is not supported. Minimum required version: %s. ' 'Disabling scikit-learn conversion API.') % (sklearn.__version__, SKLEARN_MIN_VERSION)) except: HAS_SKLEARN = False # --------------------------------------------------------------------------------------- HAS_LIBSVM = True try: import svm except: HAS_LIBSVM = False
_SKLEARN_MIN_VERSION = "0.17" _SKLEARN_MAX_VERSION = "0.19.2" def __get_sklearn_version(version): # matching 0.15b, 0.16bf, etc version_regex = r"^\d+\.\d+" version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) try: import sklearn _SKLEARN_VERSION = __get_sklearn_version(sklearn.__version__) if _SKLEARN_VERSION < _StrictVersion( _SKLEARN_MIN_VERSION) or _SKLEARN_VERSION > _StrictVersion( _SKLEARN_MAX_VERSION): _HAS_SKLEARN = False _logging.warning(( "scikit-learn version %s is not supported. Minimum required version: %s. " "Maximum required version: %s. " "Disabling scikit-learn conversion API.") % (sklearn.__version__, _SKLEARN_MIN_VERSION, _SKLEARN_MAX_VERSION)) except: _HAS_SKLEARN = False MSG_SKLEARN_NOT_FOUND = "Sklearn not found." # --------------------------------------------------------------------------------------- _HAS_LIBSVM = True try:
_keras.layers.Concatenate: _layers2.convert_merge, _keras.layers.Dot: _layers2.convert_merge, _keras.layers.core.Flatten: _layers2.convert_flatten, _keras.layers.core.Permute: _layers2.convert_permute, _keras.layers.core.Reshape: _layers2.convert_reshape, _keras.layers.embeddings.Embedding: _layers2.convert_embedding, _keras.layers.core.RepeatVector: _layers2.convert_repeat_vector, _keras.layers.core.Dropout: _layers2.default_skip, _keras.layers.core.SpatialDropout2D: _layers2.default_skip, _keras.layers.core.SpatialDropout1D: _layers2.default_skip, _keras.layers.wrappers.TimeDistributed: _layers2.default_skip, } from distutils.version import StrictVersion as _StrictVersion ## 2.2 Version check if _keras.__version__ >= _StrictVersion("2.2.0"): _KERAS_LAYER_REGISTRY[ _keras.layers.DepthwiseConv2D ] = _layers2.convert_convolution _KERAS_LAYER_REGISTRY[ _keras.engine.input_layer.InputLayer ] = _layers2.default_skip if _keras.__version__ >= _StrictVersion("2.2.1"): _KERAS_LAYER_REGISTRY[ _keras.layers.advanced_activations.ReLU ] = _layers2.convert_advanced_relu else: _KERAS_LAYER_REGISTRY[ _keras.applications.mobilenet.DepthwiseConv2D ] = _layers2.convert_convolution _KERAS_LAYER_REGISTRY[_keras.engine.topology.InputLayer] = _layers2.default_skip
def get_game_version(vstring): return _StrictVersion(vstring)
def _from_saved_model(saved_model_dir): from tensorflow.python.tools import freeze_graph # must import here as tf.contrib is only available on TF 1.x from tensorflow.contrib.saved_model.python.saved_model import reader saved_model_tags = reader.get_saved_model_tag_sets(saved_model_dir)[0] if not saved_model_tags: msg = "Unsupported SavedModel directory format: no tag_sets available" raise NotImplementedError(msg) # get model outputs output_node_names = [] if _get_version(tf.__version__) < _StrictVersion("1.13.1"): sess = tf.Session() else: sess = tf.compat.v1.Session() metagraph = tf.saved_model.loader.load(sess, saved_model_tags, saved_model_dir) for sd in metagraph.signature_def.values(): output_node_names += [ o.name.split(":")[0] for o in sd.outputs.values() ] sess.close() # get frozen graph output_graph = mktemp() tf.compat.v1.reset_default_graph() if _get_version( tf.__version__) >= _StrictVersion( "1.13.1") else tf.reset_default_graph() freeze_graph.freeze_graph( input_graph=None, input_saver=None, input_binary=None, input_checkpoint=None, output_node_names=",".join(output_node_names), restore_op_name=None, filename_tensor_name=None, output_graph=output_graph, clear_devices=True, initializer_nodes="", variable_names_whitelist="", variable_names_blacklist="", input_meta_graph=None, input_saved_model_dir=saved_model_dir, saved_model_tags=",".join(saved_model_tags), ) if _get_version(tf.__version__) < _StrictVersion("1.13.1"): graph_def = tf.GraphDef() with open(output_graph, "rb") as f: graph_def.ParseFromString(f.read()) graph_def = tf.graph_util.remove_training_nodes(graph_def) else: graph_def = tf.compat.v1.GraphDef() with open(output_graph, "rb") as f: graph_def.ParseFromString(f.read()) graph_def = tf.compat.v1.graph_util.remove_training_nodes( graph_def) with tf.Graph().as_default() as graph: tf.graph_util.import_graph_def(graph_def, name="") return graph.as_graph_def(add_shapes=True)
def global_game_version(): global _GameVersion if _GameVersion is None: _GameVersion = _StrictVersion(C.Game.Version) return _GameVersion
_keras.layers.core.Flatten: _layers2.convert_flatten, _keras.layers.core.Permute:_layers2.convert_permute, _keras.layers.core.Reshape:_layers2.convert_reshape, _keras.layers.embeddings.Embedding:_layers2.convert_embedding, _keras.layers.core.RepeatVector:_layers2.convert_repeat_vector, _keras.layers.core.Dropout:_layers2.default_skip, _keras.layers.core.SpatialDropout2D:_layers2.default_skip, _keras.layers.core.SpatialDropout1D:_layers2.default_skip, _keras.layers.wrappers.TimeDistributed:_layers2.default_skip, } from distutils.version import StrictVersion as _StrictVersion ## 2.2 Version check keras_version = _keras.__version__.rstrip('-tf') if keras_version >= _StrictVersion('2.2.0'): _KERAS_LAYER_REGISTRY[_keras.layers.DepthwiseConv2D] = ( _layers2.convert_convolution ) _KERAS_LAYER_REGISTRY[_keras.engine.input_layer.InputLayer] = ( _layers2.default_skip ) if keras_version >= _StrictVersion('2.2.1'): _KERAS_LAYER_REGISTRY[_keras.layers.advanced_activations.ReLU] = ( _layers2.convert_advanced_relu ) else: _KERAS_LAYER_REGISTRY[_keras.applications.mobilenet.DepthwiseConv2D] = ( _layers2.convert_convolution ) _KERAS_LAYER_REGISTRY[_keras.engine.topology.InputLayer] = (
def _constant_propagation(fn, new_graph, constant_nodes, constant_node_num_outputs): try: if len(constant_nodes) > 0: with tf.Graph().as_default() as graph: tf.import_graph_def(new_graph, name="") # We're only making one call to `sess.run()` in order to compute constant values. # In this context, the default optimization settings make everything dramatically # slower and more memory-intensive. if tf.__version__ < _StrictVersion("1.13.1"): session_config = tf.ConfigProto() session_config.graph_options.optimizer_options.opt_level = ( tf.OptimizerOptions.L0 ) sess = tf.Session(graph=graph, config=session_config) else: session_config = tf.compat.v1.ConfigProto() session_config.graph_options.optimizer_options.opt_level = ( tf.compat.v1.OptimizerOptions.L0 ) session_config.graph_options.rewrite_options.disable_meta_optimizer = ( True ) sess = tf.compat.v1.Session(graph=graph, config=session_config) query_list = list() control_flow_ops = list() for c in constant_nodes: for j in range(constant_node_num_outputs[c]): query = c + ":" + str(j) lower_query = query.lower() if "switch" in lower_query or "cond" in lower_query: control_flow_ops.append(query) else: query_list.append(query) result_list = sess.run(query_list) result = { query_list[i]: result_list[i] for i in range(len(query_list)) } # propagate switch one by one for op in control_flow_ops: try: res = sess.run([op]) result.update({op: res[0]}) except: logging.warning( '[Constant Propagation] Skip "dead" tensor: {}'.format( op ) ) result.update({op: None}) sess.close() for k, v in fn.graph.items(): if k in constant_node_num_outputs: if constant_node_num_outputs[k] == 1: result_entry = k + ":0" try: v.value, v.datatype = numpy_val_to_builtin_val( result[result_entry] ) except: logging.error(result_entry) logging.error(result[result_entry]) else: values = [ result[k + ":" + str(i)] for i in range(constant_node_num_outputs[k]) ] try: npval = [numpy_val_to_builtin_val(i) for i in values] v.datatype = types.tuple(tuple([val[1] for val in npval])) v.value = v.datatype() for idx, val in enumerate(npval): v.value.val[idx] = val[0] except: logging.error(values) for k, v in fn.graph.items(): if v.op == "get_tuple": inp = fn.graph[v.inputs[0]] idx = v.attr["index"] if inp.value is not None: v.value = inp.value.val[idx] v.datatype = inp.datatype.T[idx] except Exception as e: logging.exception("Constant Propagation pass failed: {}".format(e))
def test_conv( self, use_cpu_only, backend, op, padding, data_format, spatial_dim_and_ks, strides, dilations, batch_size, groups, ): # tensorflow supports groupwise convolution only for version > tf.2.5.0-rc3 if _get_version( _tf.__version__) < _StrictVersion("2.5.0") and groups != 1: return if op == tf.keras.layers.Conv3D and groups != 1: pytest.xfail( "rdar://81629932 (Conv3d with group > 1 tests failing in TF2.0 converter)" ) # TF does not support strides > 1 in conjunction with dilation_rate > 1 for i, stride in enumerate(strides): if stride > 1 and dilations[i] > 1: return # Dilations with Conv3D not supported yet, since SpaceToBatchND is only supported for ranks 3 or 4 for d in dilations: if d > 1 and op == tf.keras.layers.Conv3D: return s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks c_in, c_out = 2, 4 input_shape = None kernel_size = None if op == tf.keras.layers.Conv1D: input_shape = (batch_size, s3, c_in) kernel_size = k3 strides = strides[2] dilations = dilations[2] elif op == tf.keras.layers.Conv2D: input_shape = (batch_size, s2, s3, c_in) kernel_size = (k2, k3) strides = (strides[1], strides[2]) dilations = dilations[1:] elif op == tf.keras.layers.Conv3D: input_shape = (batch_size, s1, s2, s3, c_in) kernel_size = (k1, k2, k3) model = tf.keras.Sequential([ op( batch_input_shape=input_shape, filters=c_out, kernel_size=kernel_size, strides=strides, padding=padding.upper(), data_format=data_format, dilation_rate=dilations, groups=groups, ) ]) TensorFlowBaseTest.run_compare_tf_keras( model, [random_gen(input_shape, rand_min=-10, rand_max=10)], use_cpu_only=use_cpu_only, backend=backend, )
_keras.layers.Multiply, _keras.layers.Average, _keras.layers.Maximum, _keras.layers.Concatenate, _keras.layers.Dot, ] _KERAS_SKIP_LAYERS = [ _keras.layers.core.Dropout, _keras.layers.core.SpatialDropout1D, _keras.layers.core.SpatialDropout2D, ] from distutils.version import StrictVersion as _StrictVersion if _keras.__version__.rstrip('-tf') >= _StrictVersion('2.2.0'): from tensorflow.python.keras.engine.input_layer import InputLayer else: from keras.engine.topology import InputLayer def _to_list(x): if type(x) is not list: return [x] else: return x def _insert_to_dict(d, key, e): # d is a dict where key maps to a list if key not in d:
# --------------------------------------------------------------------------------------- HAS_SKLEARN = True SKLEARN_MIN_VERSION = '0.15' def __get_sklearn_version(version): # matching 0.15b, 0.16bf, etc version_regex = '^\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) try: import sklearn if __get_sklearn_version( sklearn.__version__) < _StrictVersion(SKLEARN_MIN_VERSION): HAS_SKLEARN = False _logging.warn(( 'scikit-learn version %s is not supported. Minimum required version: %s. ' 'Disabling scikit-learn conversion API.') % (sklearn.__version__, SKLEARN_MIN_VERSION)) except: HAS_SKLEARN = False # --------------------------------------------------------------------------------------- HAS_LIBSVM = True try: import svm except: HAS_LIBSVM = False
def __get_sklearn_version(version): # matching 0.15b, 0.16bf, etc version_regex = '^\d+\.\d+' version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version)
from turicreate._deps.minimal_package import is_minimal_pkg def __get_version(version): # matching 1.6.1, and 1.6.1rc, 1.6.1.dev version_regex = "^\d+\.\d+\.\d+" version = _re.search(version_regex, str(version)).group(0) return _StrictVersion(version) HAS_PANDAS = True PANDAS_MIN_VERSION = "0.13.0" try: import pandas if __get_version(pandas.__version__) < _StrictVersion(PANDAS_MIN_VERSION): HAS_PANDAS = False _logging.warn(( "Pandas version %s is not supported. Minimum required version: %s. " "Pandas support will be disabled.") % (pandas.__version__, PANDAS_MIN_VERSION)) except: HAS_PANDAS = False from . import pandas_mock as pandas HAS_NUMPY = True NUMPY_MIN_VERSION = "1.8.0" try: import numpy if __get_version(numpy.__version__) < _StrictVersion(NUMPY_MIN_VERSION):
_keras.layers.Multiply, _keras.layers.Average, _keras.layers.Maximum, _keras.layers.Concatenate, _keras.layers.Dot, ] _KERAS_SKIP_LAYERS = [ _keras.layers.core.Dropout, _keras.layers.core.SpatialDropout1D, _keras.layers.core.SpatialDropout2D, ] from distutils.version import StrictVersion as _StrictVersion if _keras.__version__ >= _StrictVersion('2.2.0'): from keras.engine.input_layer import InputLayer else: from keras.engine.topology import InputLayer def _to_list(x): if type(x) is not list: return [x] else: return x def _insert_to_dict(d, key, e): # d is a dict where key maps to a list if key not in d: