コード例 #1
0
    def test_control_flow(self):
        @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
        def control_flow(x):
            if x <= 0:
                return 0.
            else:
                return x * 3.

        to_save = tf.Module()
        to_save.control_flow = control_flow

        saved_model_dir = tempfile.mkdtemp()
        tf.saved_model.save(to_save, saved_model_dir)
        tf_model = tf.saved_model.load(saved_model_dir)
        concrete_func = tf_model.signatures[
            tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]

        model = coremltools.converters.tensorflow.convert([concrete_func],
                                                          inputs={'x': (1, )},
                                                          outputs=['Identity'])

        self.assertTrue(isinstance(model, coremltools.models.MLModel))
        input_data = generate_data(shape=[20])
        for data in input_data:
            tf_prediction = to_save.control_flow(data).numpy().flatten()
            cm_prediction = model.predict({'x': np.array([data])
                                           })['Identity'].flatten()
            np.testing.assert_array_almost_equal(tf_prediction,
                                                 cm_prediction,
                                                 decimal=2)
コード例 #2
0
def run_single_case(args):
    """ Run a single CTC loss test case """
    np.random.seed(42)

    (inputs, target, input_length_data, target_length_data) = test_utils.generate_data(args)
    (result, log_probs, gradients, nll) = build_and_run_popart_graph(
        inputs, target, input_length_data, target_length_data, args
    )
    ipu_grad = result[gradients]
    ipu_loss = result[nll]
    print(ipu_grad)

    pytorch_loss, pytorch_res = torch_ctc_loss(
        result[log_probs], target, input_length_data, target_length_data, args
    )

    ipu_grad = np.transpose(ipu_grad, (1, 0, 2)).reshape(pytorch_res.shape)

    grad_err = test_utils.getTensorError(ipu_grad, pytorch_res)
    loss_err = test_utils.getTensorError(ipu_loss, pytorch_loss)
    print("Gradient Error", grad_err)
    print("Loss Error", loss_err)

    print(f"IPU Loss: {ipu_loss}")
    print("Grad result: " + ("Pass" if grad_err < 1e-4 else "FAIL"))
    print("Loss result: " + ("Pass" if loss_err < 1e-5 else "FAIL"))
    return grad_err, loss_err
コード例 #3
0
    def test_subclassed_keras_model(self):
        class MyModel(tf.keras.Model):
            def __init__(self):
                super(MyModel, self).__init__()
                self.dense1 = tf.keras.layers.Dense(4)
                self.dense2 = tf.keras.layers.Dense(5)

            @tf.function
            def call(self, input_data):
                return self.dense2(self.dense1(input_data))

        keras_model = MyModel()
        inputs = generate_data(shape=(4, 4))

        # subclassed model can only be saved as SavedModel format
        keras_model._set_inputs(inputs)
        keras_model.save(self.saved_model_dir, save_format='tf')
        input_name = keras_model.inputs[0].name.split(':')[0]
        output_name = keras_model.outputs[0].name.split(':')[0].split('/')[-1]
        # convert and validate
        model = coremltools.converters.tensorflow.convert(
            self.saved_model_dir,
            inputs={input_name: (4, 4)},
            outputs=[output_name])
        self.assertTrue(isinstance(model, coremltools.models.MLModel))
        self._test_prediction(keras_model=keras_model,
                              core_ml_model=model,
                              inputs=inputs)
コード例 #4
0
def run_single_case(args, splits=[]):
    """ Run a single RNN-T loss test case """
    np.random.seed(42)

    (inputs, target, input_length_data,
     target_length_data) = test_utils.generate_data(args)
    (
        result,
        compacted_probs,
        gradients,
        compacted_gradients,
        nll,
    ) = build_and_run_graph(inputs,
                            target,
                            input_length_data,
                            target_length_data,
                            args,
                            splits=splits)
    ipu_grad = result[gradients]
    # ipu_compacted_grad = result[compacted_gradients]
    ipu_loss = result[nll]

    pytorch_loss, pytorch_grads_wlogits, pytorch_grads_wlogprobs = reference_rnnt_loss(
        inputs, target, input_length_data, target_length_data)
    grad_err = test_utils.getTensorError(ipu_grad, pytorch_grads_wlogits)
    loss_err = test_utils.getTensorError(ipu_loss, pytorch_loss)
    print("Gradient Error", grad_err)
    print("Loss Error", loss_err)

    print(f"IPU Loss: {ipu_loss}")
    print("Grad result: " + ("Pass" if grad_err < 1e-5 else "FAIL"))
    print("Loss result: " + ("Pass" if loss_err < 1e-5 else "FAIL"))
    return grad_err, loss_err
コード例 #5
0
def test_rnn_no_memory_pass():
    T.manual_seed(1111)

    input_size = 100
    hidden_size = 100
    rnn_type = 'gru'
    num_layers = 3
    num_hidden_layers = 5
    dropout = 0.2
    nr_cells = 5000
    cell_size = 17
    sparse_reads = 3
    gpu_id = -1
    debug = True
    lr = 0.001
    sequence_max_length = 10
    batch_size = 10
    cuda = gpu_id
    clip = 20
    length = 13

    rnn = SAM(input_size=input_size,
              hidden_size=hidden_size,
              rnn_type=rnn_type,
              num_layers=num_layers,
              num_hidden_layers=num_hidden_layers,
              dropout=dropout,
              nr_cells=nr_cells,
              cell_size=cell_size,
              sparse_reads=sparse_reads,
              gpu_id=gpu_id,
              debug=debug)

    optimizer = optim.Adam(rnn.parameters(), lr=lr)
    optimizer.zero_grad()

    input_data, target_output = generate_data(batch_size, length, input_size,
                                              cuda)
    target_output = target_output.transpose(0, 1).contiguous()

    (chx, mhx, rv) = (None, None, None)
    outputs = []
    for x in range(6):
        output, (chx, mhx, rv), v = rnn(input_data, (chx, mhx, rv),
                                        pass_through_memory=False)
        output = output.transpose(0, 1)
        outputs.append(output)

    output = functools.reduce(lambda x, y: x + y, outputs)
    loss = criterion((output), target_output)
    loss.backward()

    T.nn.utils.clip_grad_norm(rnn.parameters(), clip)
    optimizer.step()

    assert target_output.size() == T.Size([27, 10, 100])
    assert chx[0].size() == T.Size([num_hidden_layers, 10, 100])
    # assert mhx['memory'].size() == T.Size([10,12,17])
    assert rv == None
コード例 #6
0
def test_rnn_n():
    T.manual_seed(1111)

    input_size = 100
    hidden_size = 100
    rnn_type = 'gru'
    num_layers = 3
    num_hidden_layers = 5
    dropout = 0.2
    nr_cells = 200
    cell_size = 17
    read_heads = 2
    sparse_reads = 4
    temporal_reads = 3
    gpu_id = -1
    debug = True
    lr = 0.001
    sequence_max_length = 10
    batch_size = 10
    cuda = gpu_id
    clip = 20
    length = 13

    rnn = SDNC(input_size=input_size,
               hidden_size=hidden_size,
               rnn_type=rnn_type,
               num_layers=num_layers,
               num_hidden_layers=num_hidden_layers,
               dropout=dropout,
               nr_cells=nr_cells,
               cell_size=cell_size,
               read_heads=read_heads,
               sparse_reads=sparse_reads,
               temporal_reads=temporal_reads,
               gpu_id=gpu_id,
               debug=debug)

    optimizer = optim.Adam(rnn.parameters(), lr=lr)
    optimizer.zero_grad()

    input_data, target_output = generate_data(batch_size, length, input_size,
                                              cuda)
    target_output = target_output.transpose(0, 1).contiguous()

    output, (chx, mhx, rv), v = rnn(input_data, None)
    output = output.transpose(0, 1)

    loss = criterion((output), target_output)
    loss.backward()

    T.nn.utils.clip_grad_norm_(rnn.parameters(), clip)
    optimizer.step()

    assert target_output.size() == T.Size([27, 10, 100])
    assert chx[0].size() == T.Size([num_hidden_layers, 10, 100])
    # assert mhx['memory'].size() == T.Size([10,12,17])
    assert rv.size() == T.Size([10, 34])
コード例 #7
0
def test_rnn_1():
    T.manual_seed(1111)

    input_size = 100
    hidden_size = 100
    rnn_type = 'lstm'
    num_layers = 1
    num_hidden_layers = 1
    dropout = 0
    nr_cells = 100
    cell_size = 10
    read_heads = 1
    sparse_reads = 2
    gpu_id = -1
    debug = True
    lr = 0.001
    sequence_max_length = 10
    batch_size = 10
    cuda = gpu_id
    clip = 10
    length = 10

    rnn = SAM(input_size=input_size,
              hidden_size=hidden_size,
              rnn_type=rnn_type,
              num_layers=num_layers,
              num_hidden_layers=num_hidden_layers,
              dropout=dropout,
              nr_cells=nr_cells,
              cell_size=cell_size,
              read_heads=read_heads,
              sparse_reads=sparse_reads,
              gpu_id=gpu_id,
              debug=debug)

    optimizer = optim.Adam(rnn.parameters(), lr=lr)
    rnn = DNI(rnn, optim=optimizer)
    optimizer.zero_grad()

    input_data, target_output = generate_data(batch_size, length, input_size,
                                              cuda)
    target_output = target_output.transpose(0, 1).contiguous()

    output, (chx, mhx, rv), v = rnn(input_data, None)
    output = output.transpose(0, 1)

    loss = criterion((output), target_output)
    loss.backward()

    T.nn.utils.clip_grad_norm(rnn.parameters(), clip)

    assert target_output.size() == T.Size([21, 10, 100])
    assert chx[0][0][0].size() == T.Size([10, 100])
    # assert mhx['memory'].size() == T.Size([10,1,1])
    assert rv.size() == T.Size([10, 10])
コード例 #8
0
ファイル: test_pyhmmsvi.py プロジェクト: dillonalaird/pyhmm
def test_basic1():
    D = 2
    N = 100
    pi = np.array([0.99, 0.01])
    A = np.array([[0.90, 0.10], [0.10, 0.90]])
    mus = np.array([[0., 0.], [20., 20.]])
    sigmas = np.array([np.eye(2), 5. * np.eye(2)])
    kappa = 0.5
    nu = 5

    params = [[mus[0], sigmas[0]], [mus[1], sigmas[1]]]

    print 'label 1 true'
    print mus[0]
    print sample_invwishart(sigmas[0], nu)
    print kappa
    print nu

    print 'label 2 true'
    print mus[1]
    print sample_invwishart(sigmas[1], nu)
    print kappa
    print nu

    obs, sts = generate_data(D, N, pi, A, params)

    A_0 = 2 * np.ones((2, 2))
    emits = [
        NIW(np.ones(2), np.eye(2), kappa, nu),
        NIW(20 * np.ones(2), 5. * np.eye(2), kappa, nu)
    ]
    hmm = HMMSVI(obs, A_0, emits, 1., 0.7)
    hmm.infer(10, 10, HMMSVI.metaobs_unif, 20)

    var_x = hmm.full_local_update()
    sts_pred = np.argmax(var_x, axis=1)
    print 'hamming distance = ', np.min([
        hamming(np.array([0, 1])[sts_pred], sts),
        hamming(np.array([1, 0])[sts_pred], sts)
    ])

    print 'label 1 learned'
    print hmm.emits[0].mu_N
    print sample_invwishart(hmm.emits[0].sigma_N, hmm.emits[0].nu_N)
    print hmm.emits[0].kappa_N
    print hmm.emits[0].nu_N

    print 'label 2 learned'
    print hmm.emits[1].mu_N
    print sample_invwishart(hmm.emits[1].sigma_N, hmm.emits[1].nu_N)
    print hmm.emits[1].kappa_N
    print hmm.emits[1].nu_N
コード例 #9
0
    def _test_model(self,
                    keras_model,
                    model_path,
                    inputs,
                    outputs=None,
                    decimal=4,
                    use_cpu_only=False,
                    verbose=False):
        keras_model.save(model_path)

        # convert and validate
        model = coremltools.converters.tensorflow.convert(model_path,
                                                          inputs=inputs,
                                                          outputs=outputs)
        self.assertTrue(isinstance(model, coremltools.models.MLModel))

        if verbose:
            print('TensorFlow Keras model saved at {}'.format(model_path))
            tmp_model_path = self.model_path.rsplit('.')[0] + '.mlmodel'
            model.save(tmp_model_path)
            print('Core ML model saved at {}'.format(tmp_model_path))

        # verify numeric correctness of predictions
        # assume one input one output for now
        name, shape = list(inputs.items())[0]
        data = generate_data(shape=shape)

        keras_prediction = keras_model.predict(data)

        # If outputs are not supplied, get the output name
        # from the keras model.
        if not outputs:
            output_name = keras_model.outputs[0].name
            outputs = [output_name.split('/')[1].split(':')[0]]

        prediction = model.predict({name: data},
                                   use_cpu_only=use_cpu_only)[outputs[0]]

        if verbose:
            print('Shape Keras:', keras_prediction.shape, ' vs. Core ML:',
                  prediction.shape)
            print('Input  :', data.flatten()[:16])
            print('Keras  :', keras_prediction.flatten()[:16])
            print('Core ML:', prediction.flatten()[:16])

        np.testing.assert_array_equal(keras_prediction.shape, prediction.shape)
        np.testing.assert_almost_equal(keras_prediction.flatten(),
                                       prediction.flatten(),
                                       decimal=decimal)

        return model
コード例 #10
0
    def _test_conversion_prediction(self, keras_model, model_path, inputs,
                                    outputs):
        # convert and validate
        model = coremltools.converters.tensorflow.convert(model_path,
                                                          inputs=inputs,
                                                          outputs=outputs)
        self.assertTrue(isinstance(model, coremltools.models.MLModel))

        # verify numeric correctness of predictions
        inputs = generate_data(shape=self.input_shape)
        keras_prediction = keras_model.predict(inputs)
        output_name = keras_model.outputs[0].name.split(':')[0].split('/')[-1]
        prediction = model.predict(
            {keras_model.inputs[0].name.split(':')[0]: inputs})[output_name]
        np.testing.assert_array_equal(keras_prediction.shape, prediction.shape)
        np.testing.assert_almost_equal(keras_prediction.flatten(),
                                       prediction.flatten(),
                                       decimal=4)
コード例 #11
0
    def _test_keras_model_tf2(self, model, data_mode, decimal, use_cpu_only, has_variables, verbose):

        core_ml_model_file = self.model_file.rsplit('.')[0] + '.mlmodel'

        input_dict = {inp.op.name: inp.shape.as_list() for inp in model.inputs}
        for name, shape in input_dict.items():
            input_dict[name] = [dim if dim is not None else 1 for dim in shape]
        output_list = ['Identity']
        model.save(self.model_file)

        # convert Keras model into Core ML model format
        core_ml_model = coremltools.converters.tensorflow.convert(
            filename=self.model_file,
            inputs=input_dict,
            outputs=output_list,
            use_cpu_only=use_cpu_only)

        if verbose:
            print('\nKeras model saved at {}'.format(self.model_file))
            print('\nCore ML model description:')
            from coremltools.models.neural_network.printer import print_network_spec
            print_network_spec(core_ml_model.get_spec(), style='coding')
            core_ml_model.save(core_ml_model_file)
            print('\nCore ML model saved at {}'.format(core_ml_model_file))

        core_ml_inputs = {
            name: generate_data(shape, data_mode) for name, shape in input_dict.items()
        }

        # run prediction and compare results
        keras_output = model.predict(list(core_ml_inputs.values())[0])
        core_ml_output = core_ml_model.predict(
            core_ml_inputs, useCPUOnly=use_cpu_only)[output_list[0]]

        if verbose:
            print('\nPredictions', keras_output.shape, ' vs.', core_ml_output.shape)
            print(keras_output.flatten()[:6])
            print(core_ml_output.flatten()[:6])

        np.testing.assert_array_equal(
            keras_output.shape, core_ml_output.shape)
        np.testing.assert_almost_equal(
            keras_output.flatten(), core_ml_output.flatten(), decimal=decimal)
コード例 #12
0
    def _test_model(self,
                    keras_model,
                    model_path,
                    inputs,
                    outputs,
                    decimal=4,
                    verbose=False):
        keras_model.save(model_path)

        # convert and validate
        model = coremltools.converters.tensorflow.convert(model_path,
                                                          inputs=inputs,
                                                          outputs=outputs,
                                                          target_ios='13')
        assert isinstance(model, coremltools.models.MLModel)

        if verbose:
            print('TensorFlow Keras model saved at {}'.format(model_path))
            tmp_model_path = self.model_path.rsplit('.')[0] + '.mlmodel'
            model.save(tmp_model_path)
            print('Core ML model saved at {}'.format(tmp_model_path))

        # verify numeric correctness of predictions
        # assume one input one output for now
        name, shape = list(inputs.items())[0]
        data = generate_data(shape=shape)

        # self._predict_keras_intermediate_layer(data, 'conv1')
        keras_prediction = keras_model.predict(data)
        prediction = model.predict({name: data})[outputs[0]]

        if verbose:
            print('Shape Keras:', keras_prediction.shape, ' vs. Core ML:',
                  prediction.shape)
            print('Input  :', data.flatten()[:16])
            print('Keras  :', keras_prediction.flatten()[:16])
            print('Core ML:', prediction.flatten()[:16])

        np.testing.assert_array_equal(keras_prediction.shape, prediction.shape)
        np.testing.assert_almost_equal(keras_prediction.flatten(),
                                       prediction.flatten(),
                                       decimal=decimal)
コード例 #13
0
def test_basic3():
    N = 100
    D = 2
    pi = np.array([0.999, 0.001])
    A = np.array([[0.9, 0.1], [0.1, 0.9]])

    mus_0 = np.array([[0., 0.], [20., 20.]])
    sigmas_0 = np.array([np.eye(2), 5 * np.eye(2)])
    kappa_0 = 0.5
    nu_0 = 5

    mu_1, sigma_1 = sample_niw(mus_0[0], sigmas_0[0], kappa_0, nu_0)
    mu_2, sigma_2 = sample_niw(mus_0[1], sigmas_0[1], kappa_0, nu_0)

    params = [[mu_1, sigma_1], [mu_2, sigma_2]]

    obs, _ = generate_data(D, N, pi, A, params)

    #plt.scatter(obs[:,0], obs[:,1])
    #plt.show()

    A_0 = 2 * np.ones((A.shape[0], A.shape[0]))
    mus_N = np.array([np.mean(obs, axis=0), np.mean(obs, axis=0)])
    sigmas_N = 0.75 * np.array([np.cov(obs.T), np.cov(obs.T)])

    A_nat_0 = A_0 - 1
    n1s_0 = [
        kappa_0 * mus_N[0], kappa_0,
        sigmas_N[0] + kappa_0 * np.outer(mus_N[0], mus_N[0]), nu_0 + D + 2
    ]
    n2s_0 = [
        kappa_0 * mus_N[1], kappa_0,
        sigmas_N[1] + kappa_0 * np.outer(mus_N[1], mus_N[1]), nu_0 + D + 2
    ]

    A_nat_N = A_nat_0[:]
    n1s_N = n1s_0[:]
    n2s_N = n2s_0[:]

    iters = 20

    print 'A true'
    print A

    print 'label 1 true'
    #print mus_0[0]
    #print sigmas_0[0]
    print mu_1
    print sigma_1

    print 'label 2 true'
    #print mus_0[1]
    #print sigmas_0[1]
    print mu_2
    print sigma_2

    print 'A_nat_0'
    print A_nat_0

    for _ in xrange(iters):
        mu_1N, sigma_1N, kappa_1N, nu_1N = natural_to_standard(
            n1s_N[0], n1s_N[1], n1s_N[2], n1s_N[3])
        mu_2N, sigma_2N, kappa_2N, nu_2N = natural_to_standard(
            n2s_N[0], n2s_N[1], n2s_N[2], n2s_N[3])

        lliks1 = niw.expected_log_likelihood(obs, mu_1N, sigma_1N, kappa_1N,
                                             nu_1N)
        lliks2 = niw.expected_log_likelihood(obs, mu_2N, sigma_2N, kappa_2N,
                                             nu_2N)
        lliks = np.vstack((lliks1, lliks2)).T.copy(order='C')

        lA_mod = dir.expected_sufficient_statistics(A_nat_N + 1)
        A_mod = np.exp(lA_mod)

        lalpha = fb.forward_msgs(pi, A_mod, lliks)
        lbeta = fb.backward_msgs(A_mod, lliks)

        lexpected_states, expected_transcounts = fb.expected_statistics(
            pi, A_mod, lliks, lalpha, lbeta)

        #lexpected_states = lalpha + lbeta
        #lexpected_states -= np.max(lexpected_states, axis=1)[:,np.newaxis]
        expected_states = np.exp(lexpected_states)
        expected_states /= np.sum(expected_states, axis=1)[:, np.newaxis]

        #A_ss = np.zeros_like(A_nat_0)
        #for i in xrange(1,expected_states.shape[0]):
        #    A_ss += np.outer(expected_states[i-1], expected_states[i])
        A_ss = dir.sufficient_statistics(expected_states)

        # convert to natural parameter?
        A_ss -= 1

        A_nat_N = dir.meanfield_update(A_nat_0, A_ss)

        s11, s12, s13 = niw.expected_sufficient_statistics(
            obs, expected_states[:, 0].copy(order='C'))
        s21, s22, s23 = niw.expected_sufficient_statistics(
            obs, expected_states[:, 1].copy(order='C'))
        s1s = np.array([s11, s21])
        s2s = np.array([s12, s22])
        s3s = np.array([s13, s23])

        n11, n12, n13, n14 = niw.meanfield_update(n1s_0[0], n1s_0[1], n1s_0[2],
                                                  n1s_0[3], s1s[0], s2s[0],
                                                  s3s[0])
        n21, n22, n23, n24 = niw.meanfield_update(n2s_0[0], n2s_0[1], n2s_0[2],
                                                  n2s_0[3], s1s[1], s2s[1],
                                                  s3s[1])

        n1s_N = [n11, n12, n13, n14]
        n2s_N = [n21, n22, n23, n24]

    print 'A learned'
    A_sample = np.array([
        stats.dirichlet.rvs(A_nat_N[i, :] + 1, size=1)[0]
        for i in xrange(A.shape[0])
    ])
    print A_sample

    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n11, n12, n13, n14)
    mu_1, sigma_1 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 1 learned'
    print mu_1
    print sigma_1

    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n21, n22, n23, n24)
    mu_2, sigma_2 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 2 learned'
    print mu_2
    print sigma_2
コード例 #14
0
ファイル: test_base.py プロジェクト: yehorchankov/coremltools
    def _test_tf_model(
        self,
        graph,
        input_shapes,
        output_node_names,
        data_mode='random',
        input_refs=None,
        delta=1e-2,
        use_cpu_only=False,
        graph_optimizations="freeze",  # one of ["freeze", "convert_variables_to_constants", None]
        quantize_tf_model=False):
        """
        Common entry to testing routine.
        graph - defined TensorFlow graph.
        input_shapes -  dict str:shape for each input op (placeholder)
        output_node_names - output_node_names, a list of strings
        data_mode - auto-generated input vectors, can be 'random', 'zeros', 'ones', 'linear', etc.
        input_refs - a dictionary of reference input in tensorFlow axis order, each entry is str:shape.
            When using auto-generated input vectors, set input_refs to None.
        delta - maximum difference of normalized TensorFlow and CoreML outputs
        use_cpu_only - If True, instantiate and run CoreML model with CPU only
        graph_optimizations == "freeze" - Force TensorFlow graph to be frozen before converting.
        quantize_tf_model - If True, try to quantize TensorFlow model before converting
        """
        # Some file processing
        model_dir = tempfile.mkdtemp()
        graph_def_file = os.path.join(model_dir, 'tf_graph.pb')
        checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
        static_model_file = os.path.join(model_dir, 'tf_static.pb')
        coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')

        # add a saver
        tf.reset_default_graph()
        if graph_optimizations == "freeze":
            with graph.as_default() as g:
                saver = tf.train.Saver()

        if input_refs is None:
            feed_dict = {
                self._get_tf_tensor_name(graph, name):
                generate_data(input_shapes[name], data_mode)
                for name in input_shapes
            }
        else:
            feed_dict = {
                self._get_tf_tensor_name(graph, name): input_refs[name]
                for name in list(input_refs.keys())
            }

        with tf.Session(graph=graph) as sess:
            # initialize
            initializer_op = tf.global_variables_initializer()
            sess.run(initializer_op)
            # run the result
            fetches = [
                graph.get_operation_by_name(name).outputs[0]
                for name in output_node_names
            ]
            result = sess.run(fetches, feed_dict=feed_dict)
            # save graph definition somewhere
            tf.train.write_graph(sess.graph,
                                 model_dir,
                                 graph_def_file,
                                 as_text=False)
            # save the weights if freezing is needed
            if not graph_optimizations:
                static_model_file = graph_def_file
            elif graph_optimizations == "freeze":
                saver.save(sess, checkpoint_file)
                self._simple_freeze(
                    input_graph=graph_def_file,
                    input_checkpoint=checkpoint_file,
                    output_graph=static_model_file,
                    output_node_names=",".join(output_node_names))
            else:
                output_graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, graph.as_graph_def(), output_node_names)
                with tf.gfile.GFile(static_model_file, "wb") as f:
                    f.write(output_graph_def.SerializeToString())

        # if TF needs to be quantized, quantize the graph
        if quantize_tf_model:
            static_model_file = self._quantize_static_tf_model(
                model_dir, static_model_file, output_node_names)

        # convert to CoreML
        mlmodel = coremltools.converters.tensorflow.convert(
            static_model_file,
            inputs=input_shapes,
            outputs=output_node_names,
            use_cpu_only=use_cpu_only)

        if DEBUG:
            print('\n mlmodel description: \n')
            from coremltools.models.neural_network.printer import print_network_spec
            print_network_spec(mlmodel.get_spec(), style='coding')
            mlmodel.save(coreml_model_file)
            print('\n mlmodel saved at %s' % coreml_model_file)

        # Transpose input data as CoreML requires
        coreml_inputs = {
            name: tf_transpose(feed_dict[self._get_tf_tensor_name(graph,
                                                                  name)])
            for name in input_shapes
        }

        # Run predict in CoreML
        coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)

        for idx, out_name in enumerate(output_node_names):
            tf_out = result[idx]
            if len(tf_out.shape) == 0:
                tf_out = np.array([tf_out])

            tp = tf_out.flatten()
            coreml_out = coreml_output[out_name]
            cp = coreml_out.flatten()

            self.assertTrue(tf_out.shape == coreml_out.shape)
            for i in range(len(tp)):
                max_den = max(1.0, tp[i], cp[i])
                self.assertAlmostEqual(tp[i] / max_den,
                                       cp[i] / max_den,
                                       delta=delta)

        # Cleanup files - models on disk no longer useful
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
コード例 #15
0
ファイル: test_base.py プロジェクト: yehorchankov/coremltools
    def _test_tf_model_constant(self,
                                graph,
                                input_shapes,
                                output_node_names,
                                data_mode='random_zero_mean',
                                delta=1e-2,
                                use_cpu_only=False,
                                validate_bool_only=False):
        """
        Common entry to testing routine for graphs that have no variables.

        Parameters
        ----------
        graph: tf.Graph()
            TensorFlow graph.
        input_shapes: dict [str : shape]
            Shapes for each input (placeholder).
        output_node_names: list of str
            Output tensor names.
        data_mode: str
            Data mode for the placeholder data generation.
        input_refs: a dictionary of reference input in tensorFlow axis order.
            Each entry is str:shape. When using auto-generated input vectors,
            set input_refs to None.
        delta: float
            Delta for error checking, default 1e-2.
        use_cpu_only: bool
            If true, force use CPU only, default False.
        validate_bool_only: bool
            If true, only validate it's zero or non-zero, otherwise, validate
            float values, default False.
        """

        model_dir = tempfile.mkdtemp()
        frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
        coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')

        feed_dict = {
            self._get_tf_tensor_name(graph, name):
            generate_data(input_shapes[name], data_mode)
            for name in input_shapes
        }

        with tf.Session(graph=graph) as sess:
            # initialize
            sess.run(tf.global_variables_initializer())
            # run the result
            fetches = [
                graph.get_operation_by_name(name).outputs[0]
                for name in output_node_names
            ]
            result = sess.run(fetches, feed_dict=feed_dict)

            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                tf.get_default_graph().as_graph_def(
                ),  # The graph_def is used to retrieve the nodes
                output_node_names  # The output node names are used to select the useful nodes
            )
            with tf.gfile.GFile(frozen_model_file, 'wb') as f:
                f.write(output_graph_def.SerializeToString())

        # convert to CoreML
        mlmodel = coremltools.converters.tensorflow.convert(
            frozen_model_file,
            inputs=input_shapes,
            outputs=output_node_names,
            use_cpu_only=use_cpu_only)

        if DEBUG:
            print('\n mlmodel description: \n')
            from coremltools.models.neural_network.printer import print_network_spec
            print_network_spec(mlmodel.get_spec(), style='coding')
            mlmodel.save(coreml_model_file)
            print('\n mlmodel saved at %s' % coreml_model_file)

        # Transpose input data as CoreML requires
        coreml_inputs = {
            name: tf_transpose(feed_dict[self._get_tf_tensor_name(graph,
                                                                  name)])
            for name in input_shapes
        }

        # Run predict in CoreML
        coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)

        for idx, out_name in enumerate(output_node_names):
            tf_out = result[idx]
            if len(tf_out.shape) == 0:
                tf_out = np.array([tf_out])
            tp = tf_out.flatten()
            coreml_out = coreml_output[out_name]
            cp = coreml_out.flatten()

            self.assertTrue(tf_out.shape == coreml_out.shape,
                            msg=(tf_out.shape, 'vs.', coreml_out.shape))

            if validate_bool_only:
                cp = np.logical_and(cp, cp)
            for i in range(len(tp)):
                max_den = max(1.0, tp[i], cp[i])
                self.assertAlmostEqual(tp[i] / max_den,
                                       cp[i] / max_den,
                                       delta=delta)

        # Cleanup files - models on disk no longer useful
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
コード例 #16
0
def test_basic1():
    N = 100
    D = 2
    pi = np.array([0.999, 0.001])
    A = np.array([[0.9, 0.1], [0.1, 0.9]])
    mus_0 = np.array([[0., 0.], [20., 20.]])
    sigmas_0 = np.array([np.eye(2), 2 * np.eye(2)])
    kappa_0 = 0.5
    nu_0 = 5

    mu_1, sigma_1 = sample_niw(mus_0[0], sigmas_0[0], kappa_0, nu_0)
    mu_2, sigma_2 = sample_niw(mus_0[1], sigmas_0[1], kappa_0, nu_0)

    print 'label 1 before'
    print mu_1
    print sigma_1

    print 'label 2 before'
    print mu_2
    print sigma_2

    params = [[mu_1, sigma_1], [mu_2, sigma_2]]

    obs, _ = generate_data(D, N, pi, A, params)

    n1s = [
        kappa_0 * mus_0[0], kappa_0,
        sigmas_0[0] + kappa_0 * np.outer(mus_0[0], mus_0[0]), nu_0 + D + 2
    ]
    n2s = [
        kappa_0 * mus_0[1], kappa_0,
        sigmas_0[1] + kappa_0 * np.outer(mus_0[1], mus_0[1]), nu_0 + D + 2
    ]

    lliks1 = niw.expected_log_likelihood(obs, mus_0[0], sigmas_0[0], kappa_0,
                                         nu_0)
    lliks2 = niw.expected_log_likelihood(obs, mus_0[1], sigmas_0[1], kappa_0,
                                         nu_0)
    lliks = np.vstack((lliks1, lliks2)).T.copy(order='C')

    lalpha = fb.forward_msgs(pi, A, lliks)
    lbeta = fb.backward_msgs(A, lliks)

    lexpected_states = lalpha + lbeta
    lexpected_states -= np.max(lexpected_states, axis=1)[:, np.newaxis]
    expected_states = np.exp(lexpected_states)
    expected_states /= np.sum(expected_states, axis=1)[:, np.newaxis]

    s1s = np.sum(expected_states, axis=0)
    s2s = np.array([
        np.sum(obs * expected_states[:, i, np.newaxis], axis=0)
        for i in xrange(2)
    ])
    s3s = np.array([
        np.sum([
            np.outer(obs[i], obs[i]) * expected_states[i, 0]
            for i in xrange(obs.shape[0])
        ],
               axis=0),
        np.sum([
            np.outer(obs[i], obs[i]) * expected_states[i, 1]
            for i in xrange(obs.shape[0])
        ],
               axis=0)
    ])

    n11, n12, n13, n14 = niw.meanfield_update(n1s[0], n1s[1], n1s[2], n1s[3],
                                              s1s[0], s2s[0], s3s[0])
    n21, n22, n23, n24 = niw.meanfield_update(n2s[0], n2s[1], n2s[2], n2s[3],
                                              s1s[1], s2s[1], s3s[1])

    # Note might need to add small positive to diagonal
    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n11, n12, n13, n14)
    mu_1, sigma_1 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 1 after'
    print mu_1
    print sigma_1

    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n21, n22, n23, n24)
    mu_2, sigma_2 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 2 after'
    print mu_2
    print sigma_2
コード例 #17
0
    def _test_keras_model_tf1(self, model, data_mode, decimal, use_cpu_only, has_variables, verbose):

        graph_def_file = os.path.join(self.saved_model_dir, 'graph.pb')
        frozen_model_file = os.path.join(self.saved_model_dir, 'frozen.pb')
        core_ml_model_file = os.path.join(self.saved_model_dir, 'model.mlmodel')

        input_shapes = {inp.op.name: inp.shape.as_list() for inp in model.inputs}
        for name, shape in input_shapes.items():
            input_shapes[name] = [dim if dim is not None else 1 for dim in shape]

        output_node_names = [output.op.name for output in model.outputs]

        tf_graph = _keras.get_session().graph
        tf.reset_default_graph()
        if has_variables:
            with tf_graph.as_default():
                saver = tf.train.Saver()

        # note: if Keras backend has_variable is False, we're not making variables constant
        with tf.Session(graph=tf_graph) as sess:
            sess.run(tf.global_variables_initializer())
            feed_dict = {}
            for name, shape in input_shapes.items():
                tensor_name = tf_graph.get_operation_by_name(name).outputs[0].name
                feed_dict[tensor_name] = generate_data(shape, data_mode)
            # run the result
            fetches = [
                tf_graph.get_operation_by_name(name).outputs[0] for name in output_node_names
            ]
            result = sess.run(fetches, feed_dict=feed_dict)
            # save graph definition somewhere
            tf.train.write_graph(sess.graph, self.saved_model_dir, graph_def_file, as_text=False)

            # freeze_graph() has been raising error with tf.keras models since no
            # later than TensorFlow 1.6, so we're not using freeze_graph() here.
            # See: https://github.com/tensorflow/models/issues/5387
            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                tf_graph.as_graph_def(),  # The graph_def is used to retrieve the nodes
                output_node_names  # The output node names are used to select the useful nodes
            )
            with tf.gfile.GFile(frozen_model_file, 'wb') as f:
                f.write(output_graph_def.SerializeToString())

        _keras.clear_session()

        # convert to Core ML model format
        core_ml_model = coremltools.converters.tensorflow.convert(
            frozen_model_file,
            inputs=input_shapes,
            outputs=output_node_names,
            use_cpu_only=use_cpu_only)

        if verbose:
            print('\nFrozen model saved at {}'.format(frozen_model_file))
            print('\nCore ML model description:')
            from coremltools.models.neural_network.printer import print_network_spec
            print_network_spec(core_ml_model.get_spec(), style='coding')
            core_ml_model.save(core_ml_model_file)
            print('\nCore ML model saved at {}'.format(core_ml_model_file))

        # transpose input data as Core ML requires
        core_ml_inputs = {
            name: tf_transpose(feed_dict[self._get_tf_tensor_name(tf_graph, name)])
            for name in input_shapes
        }

        # run prediction in Core ML
        core_ml_output = core_ml_model.predict(core_ml_inputs, useCPUOnly=use_cpu_only)

        for idx, out_name in enumerate(output_node_names):
            tf_out = result[idx]
            if len(tf_out.shape) == 0:
                tf_out = np.array([tf_out])
            tp = tf_out.flatten()
            coreml_out = core_ml_output[out_name]
            cp = coreml_out.flatten()
            self.assertTrue(tf_out.shape == coreml_out.shape)
            for i in range(len(tp)):
                max_den = max(1.0, tp[i], cp[i])
                self.assertAlmostEqual(tp[i] / max_den, cp[i] / max_den, delta=10 ** -decimal)
コード例 #18
0
    def _test_keras_model(self,
                          model,
                          data_mode='random',
                          delta=1e-2,
                          use_cpu_only=True,
                          has_variables=True):
        """Saves out the backend TF graph from the Keras model and tests it  
        """

        model_dir = tempfile.mkdtemp()
        graph_def_file = os.path.join(model_dir, 'tf_graph.pb')
        checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
        frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
        coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')

        input_shapes = {
            inp.op.name: inp.shape.as_list()
            for inp in model.inputs
        }
        for name, shape in input_shapes.items():
            input_shapes[name] = [
                dim if dim is not None else 1 for dim in shape
            ]

        output_node_names = [output.op.name for output in model.outputs]

        tf_graph = K.get_session().graph
        tf.reset_default_graph()
        if has_variables:
            with tf_graph.as_default() as g:
                saver = tf.train.Saver()

        # TODO - if Keras backend has_variable is False, we're not making variables constant
        with tf.Session(graph=tf_graph) as sess:
            sess.run(tf.global_variables_initializer())
            feed_dict = {}
            for name, shape in input_shapes.items():
                tensor_name = tf_graph.get_operation_by_name(
                    name).outputs[0].name
                feed_dict[tensor_name] = generate_data(shape, data_mode)
            # run the result
            fetches = [
                tf_graph.get_operation_by_name(name).outputs[0]
                for name in output_node_names
            ]
            result = sess.run(fetches, feed_dict=feed_dict)
            # save graph definition somewhere
            tf.train.write_graph(sess.graph,
                                 model_dir,
                                 graph_def_file,
                                 as_text=False)

            # freeze_graph() has been raising error with tf.keras models since no
            # later than TensorFlow 1.6, so we're not using freeze_graph() here.
            # See: https://github.com/tensorflow/models/issues/5387
            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                tf_graph.as_graph_def(
                ),  # The graph_def is used to retrieve the nodes
                output_node_names  # The output node names are used to select the useful nodes
            )
            with tf.gfile.GFile(frozen_model_file, "wb") as f:
                f.write(output_graph_def.SerializeToString())

        K.clear_session()

        # convert to CoreML
        mlmodel = coremltools.converters.tensorflow.convert(
            frozen_model_file,
            inputs=input_shapes,
            outputs=output_node_names,
            use_cpu_only=use_cpu_only)

        if DEBUG:
            print('\n mlmodel description: \n')
            from coremltools.models.neural_network.printer import print_network_spec
            print_network_spec(mlmodel.get_spec(), style='coding')
            mlmodel.save(coreml_model_file)
            print('\n mlmodel saved at %s' % (coreml_model_file))

        # Transpose input data as CoreML requires
        coreml_inputs = {
            name:
            tf_transpose(feed_dict[self._get_tf_tensor_name(tf_graph, name)])
            for name in input_shapes
        }

        # Run predict in CoreML
        coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)

        for idx, out_name in enumerate(output_node_names):
            tf_out = result[idx]
            if len(tf_out.shape) == 0:
                tf_out = np.array([tf_out])
            tp = tf_out.flatten()
            coreml_out = coreml_output[out_name]
            cp = coreml_out.flatten()
            self.assertTrue(tf_out.shape == coreml_out.shape)
            for i in range(len(tp)):
                max_den = max(1.0, tp[i], cp[i])
                self.assertAlmostEqual(tp[i] / max_den,
                                       cp[i] / max_den,
                                       delta=delta)

        # Cleanup files - models on disk no longer useful
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
コード例 #19
0
def test_basic2():
    N = 100
    D = 2
    pi = np.array([0.999, 0.001])
    A = np.array([[0.9, 0.1], [0.1, 0.9]])
    mus_0 = np.array([[0., 0.], [20., 20.]])
    sigmas_0 = np.array([np.eye(2), 5 * np.eye(2)])
    kappa_0 = 0.5
    nu_0 = 5

    mu_1, sigma_1 = sample_niw(mus_0[0], sigmas_0[0], kappa_0, nu_0)
    mu_2, sigma_2 = sample_niw(mus_0[1], sigmas_0[1], kappa_0, nu_0)

    params = [[mu_1, sigma_1], [mu_2, sigma_2]]

    obs, _ = generate_data(D, N, pi, A, params)

    plt.scatter(obs[:, 0], obs[:, 1])
    plt.show()

    mus_N = np.array([np.mean(obs, axis=0), np.mean(obs, axis=0)])
    sigmas_N = 0.75 * np.array([np.cov(obs.T), np.cov(obs.T)])

    n1s_0 = [
        kappa_0 * mus_N[0], kappa_0,
        sigmas_N[0] + kappa_0 * np.outer(mus_N[0], mus_N[0]), nu_0 + D + 2
    ]
    n2s_0 = [
        kappa_0 * mus_N[1], kappa_0,
        sigmas_N[1] + kappa_0 * np.outer(mus_N[1], mus_N[1]), nu_0 + D + 2
    ]

    n1s_N = n1s_0[:]
    n2s_N = n2s_0[:]

    iters = 20

    print 'label 1 true'
    print mus_0[0]
    print sigmas_0[0]

    print 'label 2 true'
    print mus_0[1]
    print sigmas_0[1]

    for _ in xrange(iters):

        mu_1N, sigma_1N, kappa_1N, nu_1N = natural_to_standard(
            n1s_N[0], n1s_N[1], n1s_N[2], n1s_N[3])
        mu_2N, sigma_2N, kappa_2N, nu_2N = natural_to_standard(
            n2s_N[0], n2s_N[1], n2s_N[2], n2s_N[3])

        lliks1 = niw.expected_log_likelihood(obs, mu_1N, sigma_1N, kappa_1N,
                                             nu_1N)
        lliks2 = niw.expected_log_likelihood(obs, mu_2N, sigma_2N, kappa_2N,
                                             nu_2N)
        lliks = np.vstack((lliks1, lliks2)).T.copy(order='C')

        lalpha = fb.forward_msgs(pi, A, lliks)
        lbeta = fb.backward_msgs(A, lliks)

        lexpected_states = lalpha + lbeta
        lexpected_states -= np.max(lexpected_states, axis=1)[:, np.newaxis]
        expected_states = np.exp(lexpected_states)
        expected_states /= np.sum(expected_states, axis=1)[:, np.newaxis]

        s11, s12, s13 = niw.expected_sufficient_statistics(
            obs, expected_states[:, 0].copy(order='C'))
        s21, s22, s23 = niw.expected_sufficient_statistics(
            obs, expected_states[:, 1].copy(order='C'))
        s1s = np.array([s11, s21])
        s2s = np.array([s12, s22])
        s3s = np.array([s13, s23])

        n11, n12, n13, n14 = niw.meanfield_update(n1s_0[0], n1s_0[1], n1s_0[2],
                                                  n1s_0[3], s1s[0], s2s[0],
                                                  s3s[0])
        n21, n22, n23, n24 = niw.meanfield_update(n2s_0[0], n2s_0[1], n2s_0[2],
                                                  n2s_0[3], s1s[1], s2s[1],
                                                  s3s[1])
        n1s_N = [n11, n12, n13, n14]
        n2s_N = [n21, n22, n23, n24]

    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n11, n12, n13, n14)
    mu_1, sigma_1 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 1 learned'
    print mu_1
    print sigma_1

    mu_0, sigma_0, kappa_0, nu_0 = natural_to_standard(n21, n22, n23, n24)
    mu_2, sigma_2 = sample_niw(mu_0, sigma_0, kappa_0, nu_0)
    print 'label 2 learned'
    print mu_2
    print sigma_2