Ejemplo n.º 1
0
 def test_metrics_consistent(self):
     # Tests that the identity metrics used to report in-sample predictions match
     # the behavior of standard metrics.
     g = ops.Graph()
     with g.as_default():
         features = {
             feature_keys.TrainEvalFeatures.TIMES:
             array_ops.zeros((1, 1)),
             feature_keys.TrainEvalFeatures.VALUES:
             array_ops.zeros((1, 1, 1)),
             "ticker":
             array_ops.reshape(
                 math_ops.cast(variables.VariableV1(
                     name="ticker",
                     initial_value=0,
                     dtype=dtypes.int64,
                     collections=[ops.GraphKeys.LOCAL_VARIABLES
                                  ]).count_up_to(10),
                               dtype=dtypes.float32), (1, 1, 1))
         }
         model_fn = ts_head_lib.TimeSeriesRegressionHead(
             model=_TickerModel(),
             state_manager=state_management.PassthroughStateManager(),
             optimizer=train.GradientDescentOptimizer(
                 0.001)).create_estimator_spec
         outputs = model_fn(features=features,
                            labels=None,
                            mode=estimator_lib.ModeKeys.EVAL)
         metric_update_ops = [
             metric[1] for metric in outputs.eval_metric_ops.values()
         ]
         loss_mean, loss_update = metrics.mean(outputs.loss)
         metric_update_ops.append(loss_update)
         with self.cached_session() as sess:
             coordinator = coordinator_lib.Coordinator()
             queue_runner_impl.start_queue_runners(sess, coord=coordinator)
             variables.local_variables_initializer().run()
             sess.run(metric_update_ops)
             loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
                 (loss_mean, outputs.eval_metric_ops["ticker"][0],
                  outputs.eval_metric_ops[
                      feature_keys.FilteringResults.STATE_TUPLE][0][0]))
             # The custom model_utils metrics for in-sample predictions should be in
             # sync with the Estimator's mean metric for model loss.
             self.assertAllClose(0., loss_evaled)
             self.assertAllClose((((0., ), ), ), metric_evaled)
             self.assertAllClose((((0., ), ), ), nested_metric_evaled)
             coordinator.request_stop()
             coordinator.join()
Ejemplo n.º 2
0
  def _VariableRankTest(self,
                        np_scatter,
                        tf_scatter,
                        vtype,
                        itype,
                        repeat_indices=False):
    np.random.seed(8)
    ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
    indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
    with self.cached_session(use_gpu=True):
      for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
        num_updates = indices_shape[0]
        ixdim = indices_shape[-1]

        indexable_area_shape = ()
        for i in range(ixdim):
          indexable_area_shape += (ref_shape[i],)
        all_indices = [
            list(coord)
            for coord, _ in np.ndenumerate(
                np.empty(indexable_area_shape, vtype))
        ]
        np.random.shuffle(all_indices)
        indices = np.array(all_indices[:num_updates])

        if num_updates > 1 and repeat_indices:
          indices = indices[:num_updates // 2]
          for _ in range(num_updates - num_updates // 2):
            indices = np.append(
                indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
          np.random.shuffle(indices)
        indices = _AsType(indices[:num_updates], itype)

        updates_shape = (num_updates,)
        for i in range(ixdim, len(ref_shape)):
          updates_shape += (ref_shape[i],)
        updates = _AsType(np.random.randn(*(updates_shape)), vtype)
        ref = _AsType(np.random.randn(*(ref_shape)), vtype)

        # Scatter via numpy
        new = ref.copy()
        np_scatter(new, indices, updates)
        # Scatter via tensorflow
        ref_var = variables.VariableV1(ref)
        ref_var.initializer.run()
        tf_scatter(ref_var, indices, updates).eval()

        # Compare
        self.assertAllClose(new, self.evaluate(ref_var))
    def testGrpcDebugWrapperSessionWithoutWatchFnWorks(self):
        u = variables.VariableV1(2.1, name="u")
        v = variables.VariableV1(20.0, name="v")
        w = math_ops.multiply(u, v, name="w")

        sess = session.Session(
            config=session_debug_testlib.no_rewrite_session_config())
        sess.run(u.initializer)
        sess.run(v.initializer)

        sess = grpc_wrapper.GrpcDebugWrapperSession(
            sess, "localhost:%d" % self._server_port)
        w_result = sess.run(w)
        self.assertAllClose(42.0, w_result)

        dump = debug_data.DebugDumpDir(self._dump_root)
        self.assertLessEqual(5, dump.size)
        self.assertAllClose([2.1], dump.get_tensors("u", 0, "DebugIdentity"))
        self.assertAllClose([2.1],
                            dump.get_tensors("u/read", 0, "DebugIdentity"))
        self.assertAllClose([20.0], dump.get_tensors("v", 0, "DebugIdentity"))
        self.assertAllClose([20.0],
                            dump.get_tensors("v/read", 0, "DebugIdentity"))
        self.assertAllClose([42.0], dump.get_tensors("w", 0, "DebugIdentity"))
Ejemplo n.º 4
0
 def _create_slots(self):
     """Make unshrinked internal variables (slots)."""
     # Unshrinked variables have the updates before applying L1 regularization.
     # Each unshrinked slot variable is either a `Variable` or list of
     # `Variable`, depending on the value of its corresponding primary variable.
     # We avoid using `PartitionedVariable` for the unshrinked slots since we do
     # not need any of the extra information.
     self._slots = collections.defaultdict(list)
     for name in ['sparse_features_weights', 'dense_features_weights']:
         for var in self._variables[name]:
             # Our primary variable may be either a PartitionedVariable, or a list
             # of Variables (each representing a partition).
             if (isinstance(var, var_ops.PartitionedVariable)
                     or isinstance(var, list)):
                 var_list = []
                 # pylint: disable=protected-access
                 for v in var:
                     with ops.colocate_with(v):
                         # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109
                         # is fixed.
                         slot_var = var_ops.VariableV1(
                             initial_value=array_ops.zeros_like(
                                 v.initialized_value(), dtypes.float32),
                             name=v.op.name + '_unshrinked/SDCAOptimizer')
                         var_list.append(slot_var)
                 self._slots['unshrinked_' + name].append(var_list)
                 # pylint: enable=protected-access
             else:
                 with ops.device(var.device):
                     # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
                     # fixed.
                     self._slots['unshrinked_' + name].append(
                         var_ops.VariableV1(array_ops.zeros_like(
                             var.initialized_value(), dtypes.float32),
                                            name=var.op.name +
                                            '_unshrinked/SDCAOptimizer'))
Ejemplo n.º 5
0
    def setUp(self):
        """Write test SavedModels to a temp directory."""
        with session.Session(graph=ops.Graph()) as sess:
            x = variables.VariableV1(5, name="x")
            y = variables.VariableV1(11, name="y")
            z = x + y
            sess.run(variables.global_variables_initializer())

            foo_sig_def = signature_def_utils.build_signature_def(
                {"foo_input": utils.build_tensor_info(x)},
                {"foo_output": utils.build_tensor_info(z)})
            bar_sig_def = signature_def_utils.build_signature_def(
                {
                    "bar_x": utils.build_tensor_info(x),
                    "bar_y": utils.build_tensor_info(y)
                }, {"bar_z": utils.build_tensor_info(z)})

            builder = saved_model_builder.SavedModelBuilder(
                SIMPLE_ADD_SAVED_MODEL)
            builder.add_meta_graph_and_variables(sess, ["foo_graph"], {
                "foo": foo_sig_def,
                "bar": bar_sig_def
            })
            builder.save()

            # Write SavedModel with a main_op
            assign_op = control_flow_ops.group(state_ops.assign(y, 7))

            builder = saved_model_builder.SavedModelBuilder(
                SAVED_MODEL_WITH_MAIN_OP)
            builder.add_meta_graph_and_variables(sess, ["foo_graph"], {
                "foo": foo_sig_def,
                "bar": bar_sig_def
            },
                                                 main_op=assign_op)
            builder.save()
Ejemplo n.º 6
0
  def testLocalInitOp(self):
    logdir = self._test_dir("default_local_init_op")
    with ops.Graph().as_default():
      # A local variable.
      v = variables.VariableV1(
          [1.0, 2.0, 3.0],
          trainable=False,
          collections=[ops.GraphKeys.LOCAL_VARIABLES])

      # An entity which is initialized through a TABLE_INITIALIZER.
      w = variables.VariableV1([4, 5, 6], trainable=False, collections=[])
      ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)

      # This shouldn't add a variable to the VARIABLES collection responsible
      # for variables that are saved/restored from checkpoints.
      self.assertEquals(len(variables.global_variables()), 0)

      # Suppress normal variable inits to make sure the local one is
      # initialized via local_init_op.
      sv = supervisor.Supervisor(logdir=logdir, init_op=None)
      sess = sv.prepare_or_wait_for_session("")
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
      self.assertAllClose([4, 5, 6], sess.run(w))
      sv.stop()
    def testExtendAfterQueueRunners(self):
        server = self._cached_server
        with session.Session(server.target) as sess:
            input_queue = input_ops.input_producer(
                constant_op.constant([0.], dtype=dtypes.float32))
            self.assertIsNotNone(input_queue)

            var = variables.VariableV1(1.,
                                       dtype=dtypes.float32,
                                       trainable=False,
                                       name="var")

            sess.run(variables.global_variables_initializer())
            queue_runner_impl.start_queue_runners(sess)
            sess.run(var.assign(3.0))
Ejemplo n.º 8
0
 def testIgnoreMultiStarts(self):
   with self.cached_session() as sess:
     # CountUpTo will raise OUT_OF_RANGE when it reaches the count.
     zero64 = constant_op.constant(0, dtype=dtypes.int64)
     var = variables.VariableV1(zero64)
     count_up_to = var.count_up_to(3)
     queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
     variables.global_variables_initializer().run()
     coord = coordinator.Coordinator()
     qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
     threads = []
     # NOTE that this test does not actually start the threads.
     threads.extend(qr.create_threads(sess, coord=coord))
     new_threads = qr.create_threads(sess, coord=coord)
     self.assertEqual([], new_threads)
Ejemplo n.º 9
0
 def testLogdirButExplicitlyNoSummaryWriter(self):
   logdir = self._test_dir("explicit_no_summary_writer")
   with ops.Graph().as_default():
     variables.VariableV1([1.0], name="foo")
     summary.scalar("c1", constant_op.constant(1))
     summary.scalar("c2", constant_op.constant(2))
     summary.scalar("c3", constant_op.constant(3))
     summ = summary.merge_all()
     sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
     sess = sv.prepare_or_wait_for_session("")
     # Check that a checkpoint is still be generated.
     self._wait_for_glob(sv.save_path, 3.0)
     # Check that we cannot write a summary
     with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
       sv.summary_computed(sess, sess.run(summ))
Ejemplo n.º 10
0
        def get_session(is_chief):
            g = ops.Graph()
            with g.as_default():
                with ops.device("/job:local"):
                    v = variables.VariableV1(
                        1.0,
                        name="ready_for_local_init_op_restore_v_" + str(uid))
                    vadd = v.assign_add(1)
                    w = variables.VariableV1(
                        v,
                        trainable=False,
                        collections=[ops.GraphKeys.LOCAL_VARIABLES],
                        name="ready_for_local_init_op_restore_w_" + str(uid))
                    ready_for_local_init_op = variables.report_uninitialized_variables(
                        variables.global_variables())
            sv = supervisor.Supervisor(
                logdir=logdir,
                is_chief=is_chief,
                graph=g,
                recovery_wait_secs=1,
                ready_for_local_init_op=ready_for_local_init_op)
            sess = sv.prepare_or_wait_for_session(server.target)

            return sv, sess, v, vadd, w
Ejemplo n.º 11
0
  def setUp(self):
    """Test setup.

    Structure of the forward graph:
              f
             | |
        -----   -----
        |           |
        d           e
       | |         | |
    ---   ---------  ---
    |         |        |
    a         b        c

    Construct a backward graph using the GradientDescentOptimizer.
    """

    self.a = variables.VariableV1(1.0, name="a")
    self.b = variables.VariableV1(2.0, name="b")
    self.c = variables.VariableV1(4.0, name="c")
    self.d = math_ops.multiply(self.a, self.b, name="d")
    self.e = math_ops.multiply(self.b, self.c, name="e")
    self.f = math_ops.multiply(self.d, self.e, name="f")

    # Gradient descent optimizer that minimizes g.
    gradient_descent.GradientDescentOptimizer(0.01).minimize(
        self.f, name="optim")

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        disable_model_pruning=True,
        arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
        constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
    graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
    config = config_pb2.ConfigProto(graph_options=graph_options)
    self.sess = session.Session(config=config)
    self.sess.run(variables.global_variables_initializer())
Ejemplo n.º 12
0
 def testColocationConstraints(self):
     with ops.Graph().as_default() as g:
         c = constant_op.constant([10])
         v = variables.VariableV1([3], dtype=dtypes.int32)
         i = gen_array_ops.ref_identity(v)
         a = state_ops.assign(i, c)
         train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
         train_op.append(a)
         mg = meta_graph.create_meta_graph_def(graph=g)
         grappler_item = item.Item(mg)
         groups = grappler_item.GetColocationGroups()
         self.assertEqual(len(groups), 1)
         self.assertItemsEqual(
             groups[0],
             ['Assign', 'RefIdentity', 'Variable', 'Variable/Assign'])
Ejemplo n.º 13
0
 def testTwoOps(self):
     with self.cached_session() as sess:
         # CountUpTo will raise OUT_OF_RANGE when it reaches the count.
         zero64 = constant_op.constant(0, dtype=dtypes.int64)
         var0 = variables.VariableV1(zero64)
         count_up_to_3 = var0.count_up_to(3)
         var1 = variables.VariableV1(zero64)
         count_up_to_30 = var1.count_up_to(30)
         queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
         qr = queue_runner_impl.QueueRunner(queue,
                                            [count_up_to_3, count_up_to_30])
         threads = qr.create_threads(sess)
         self.assertEqual(sorted(t.name for t in threads), [
             "QueueRunnerThread-fifo_queue-CountUpTo:0",
             "QueueRunnerThread-fifo_queue-CountUpTo_1:0"
         ])
         self.evaluate(variables.global_variables_initializer())
         for t in threads:
             t.start()
         for t in threads:
             t.join()
         self.assertEqual(0, len(qr.exceptions_raised))
         self.assertEqual(3, self.evaluate(var0))
         self.assertEqual(30, self.evaluate(var1))
Ejemplo n.º 14
0
    def testVariables(self, use_resource):
        cluster = data_service_test_base.TestCluster(num_workers=1)
        if not use_resource:
            with variable_scope.variable_scope("foo", use_resource=False):
                v = variables.VariableV1(10, dtype=dtypes.int64)
        else:
            v = variables.Variable(10, dtype=dtypes.int64)

        ds = dataset_ops.Dataset.range(3)
        ds = ds.map(lambda x: x + v)
        ds = self.make_distributed_dataset(ds, cluster)
        self.evaluate(v.initializer)
        self.assertDatasetProduces(ds,
                                   list(range(10, 13)),
                                   requires_initialization=True)
    def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenServers(self):
        with session.Session(config=session_debug_testlib.
                             no_rewrite_session_config()) as sess:
            v = variables.VariableV1(50.0, name="v")
            delta = constant_op.constant(5.0, name="delta")
            inc_v = state_ops.assign_add(v, delta, name="inc_v")

            sess.run(v.initializer)

            run_metadata = config_pb2.RunMetadata()
            run_options = config_pb2.RunOptions(output_partition_graphs=True)
            debug_utils.watch_graph(
                run_options,
                sess.graph,
                debug_ops=["DebugIdentity(gated_grpc=true)"],
                debug_urls=[
                    self._debug_server_url_1, self._debug_server_url_2
                ])

            for i in xrange(4):
                self._server_1.clear_data()
                self._server_2.clear_data()

                if i % 2 == 0:
                    self._server_1.request_watch("delta", 0, "DebugIdentity")
                    self._server_2.request_watch("v", 0, "DebugIdentity")
                else:
                    self._server_1.request_unwatch("delta", 0, "DebugIdentity")
                    self._server_2.request_unwatch("v", 0, "DebugIdentity")

                sess.run(inc_v, options=run_options, run_metadata=run_metadata)

                if i % 2 == 0:
                    self.assertEqual(1,
                                     len(self._server_1.debug_tensor_values))
                    self.assertEqual(1,
                                     len(self._server_2.debug_tensor_values))
                    self.assertAllClose(
                        [5.0], self._server_1.
                        debug_tensor_values["delta:0:DebugIdentity"])
                    self.assertAllClose([
                        50 + 5.0 * i
                    ], self._server_2.debug_tensor_values["v:0:DebugIdentity"])
                else:
                    self.assertEqual(0,
                                     len(self._server_1.debug_tensor_values))
                    self.assertEqual(0,
                                     len(self._server_2.debug_tensor_values))
Ejemplo n.º 16
0
    def _testOneSimpleInference(self, rnn_mode, num_layers, num_units,
                                input_size, batch_size, seq_length, dir_count,
                                dropout, expected, tolerance):
        random_seed.set_random_seed(5678)
        model = _CreateModel(
            rnn_mode,
            num_layers,
            num_units,
            input_size,
            input_mode="auto_select",
            direction=(cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION if dir_count == 1
                       else cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION),
            dropout=dropout)
        has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM)
        params_size_t = model.params_size()
        input_data = array_ops.ones([seq_length, batch_size, input_size])
        input_h = array_ops.ones(
            [num_layers * dir_count, batch_size, num_units])
        params = variables.VariableV1(array_ops.ones([params_size_t]),
                                      validate_shape=False)
        if has_input_c:
            input_c = array_ops.ones(
                [num_layers * dir_count, batch_size, num_units])
            output, output_h, output_c = model(input_data=input_data,
                                               input_h=input_h,
                                               input_c=input_c,
                                               params=params,
                                               is_training=False)
        else:
            output, output_h = model(input_data=input_data,
                                     input_h=input_h,
                                     params=params,
                                     is_training=False)
        output_sum = math_ops.reduce_sum(output)
        output_h_sum = math_ops.reduce_sum(output_h)
        total_sum = output_sum + output_h_sum
        if has_input_c:
            output_c_sum = math_ops.reduce_sum(output_c)
            total_sum += output_c_sum
        with self.test_session(use_gpu=True,
                               graph=ops.get_default_graph()) as sess:
            sess.run(variables.global_variables_initializer())
            total_sum_v = sess.run([total_sum])

            self.assertAllClose(total_sum_v[0],
                                expected,
                                atol=tolerance,
                                rtol=tolerance)
    def test_optimizer_kwargs(self):
        # Checks that the 'method' argument is stil present
        # after running optimizer.minimize().
        # Bug reference: b/64065260
        vector_initial_value = [7., 7.]
        vector = variables.VariableV1(vector_initial_value, 'vector')
        loss = math_ops.reduce_sum(math_ops.square(vector))

        optimizer = external_optimizer.ScipyOptimizerInterface(loss,
                                                               method='SLSQP')

        with self.cached_session() as sess:
            sess.run(variables.global_variables_initializer())
            optimizer.minimize(sess)
            method = optimizer.optimizer_kwargs.get('method')
            self.assertEqual('SLSQP', method)
    def test_vector_bounds(self):
        vector_initial_value = [7., 7.]
        vector = variables.VariableV1(vector_initial_value, 'vector')

        # Make norm as small as possible.
        loss = math_ops.reduce_sum(math_ops.square(vector))

        var_to_bounds = {vector: ([None, 2.], None)}

        optimizer = external_optimizer.ScipyOptimizerInterface(
            loss, var_to_bounds=var_to_bounds)

        with self.cached_session() as sess:
            sess.run(variables.global_variables_initializer())
            optimizer.minimize(sess)
            self.assertAllClose([0., 2.], sess.run(vector))
Ejemplo n.º 19
0
 def _model_fn(features, labels, mode, config):
   del labels
   del mode
   del config
   global_step = training_util.get_or_create_global_step()
   update_global_step_op = global_step.assign_add(1)
   latest_feature = variables.VariableV1(
       0, name='latest_feature', dtype=dtypes.int64)
   store_latest_feature_op = latest_feature.assign(features)
   ops.add_to_collection('my_vars', global_step)
   ops.add_to_collection('my_vars', latest_feature)
   return model_fn.EstimatorSpec(
       mode='train',
       train_op=control_flow_ops.group(
           [update_global_step_op, store_latest_feature_op]),
       loss=constant_op.constant(2.0))
Ejemplo n.º 20
0
 def _GetGraph(self):
     """Get the graph for testing."""
     g = ops.Graph()
     with g.as_default():
         with g.device("/GPU:0"):
             inp = array_ops.placeholder(dtype=dtypes.float32,
                                         shape=[None, 1, 1],
                                         name="input")
             var = variables.VariableV1([[[1.0]]],
                                        dtype=dtypes.float32,
                                        name="v1")
             add = inp + var.value()
             mul = inp * add
             add = mul + add
             out = array_ops.identity(add, name="output")
     return g, var, inp, out
    def test_stop(self):
        hook = wals_lib._StopAtSweepHook(last_sweep=10)
        completed_sweeps = variables.VariableV1(
            8, name=wals_lib.WALSMatrixFactorization.COMPLETED_SWEEPS)
        train_op = state_ops.assign_add(completed_sweeps, 1)
        hook.begin()

        with self.cached_session() as sess:
            sess.run([variables.global_variables_initializer()])
            mon_sess = monitored_session._HookedSession(sess, [hook])
            mon_sess.run(train_op)
            # completed_sweeps is 9 after running train_op.
            self.assertFalse(mon_sess.should_stop())
            mon_sess.run(train_op)
            # completed_sweeps is 10 after running train_op.
            self.assertTrue(mon_sess.should_stop())
Ejemplo n.º 22
0
    def setUp(self):
        self.session_root = tempfile.mkdtemp()

        self.v = variables.VariableV1(10.0, dtype=dtypes.float32, name="v")
        self.delta = constant_op.constant(1.0,
                                          dtype=dtypes.float32,
                                          name="delta")
        self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
        self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
        self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")

        self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
        self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")

        self.sess = session.Session()
        self.sess.run(self.v.initializer)
Ejemplo n.º 23
0
 def model_fn(features, labels, mode):
   _, _ = features, labels
   w = variables.VariableV1(
       initial_value=[0.],
       trainable=False,
       collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
   init_op = control_flow_ops.group(
       [w.initializer, training.get_global_step().initializer])
   return estimator_lib.EstimatorSpec(
       mode,
       loss=constant_op.constant(3.),
       scaffold=training.Scaffold(init_op=init_op),
       train_op=constant_op.constant(5.),
       eval_metric_ops={
           'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
       })
Ejemplo n.º 24
0
 def get_synthetic_inputs(self, input_name, nclass):
     inputs = tf.random_uniform(self.get_input_shapes('train')[0],
                                dtype=self.get_input_data_types('train')[0])
     inputs = variables.VariableV1(
         inputs,
         trainable=False,
         collections=[tf.GraphKeys.LOCAL_VARIABLES],
         name=input_name)
     labels = tf.convert_to_tensor(
         np.random.randint(28,
                           size=[self.batch_size, self.max_label_length]))
     input_lengths = tf.convert_to_tensor([self.max_time_steps] *
                                          self.batch_size)
     label_lengths = tf.convert_to_tensor([self.max_label_length] *
                                          self.batch_size)
     return [inputs, labels, input_lengths, label_lengths]
Ejemplo n.º 25
0
 def testPrepareSessionWithCyclicInitializer(self):
   # Regression test. Previously Variable._build_initializer_expr would enter
   # into an infinite recursion when the variable's initial_value involved
   # cyclic dependencies.
   with ops.Graph().as_default():
     i = control_flow_ops.while_loop(lambda i: i < 1, lambda i: i + 1, [0])
     v = variables.VariableV1(array_ops.identity(i), name="v")
     with self.cached_session():
       self.assertEqual(False, variables.is_variable_initialized(v).eval())
     sm = session_manager.SessionManager(
         ready_op=variables.report_uninitialized_variables())
     sess = sm.prepare_session("", init_op=v.initializer)
     self.assertEqual(1, sess.run(v))
     self.assertEqual(
         True,
         variables.is_variable_initialized(
             sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
Ejemplo n.º 26
0
def tfadd_with_ckpt_saver(out_dir):
  x = array_ops.placeholder(dtypes.int32, name='x_hold')
  y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
  math_ops.add(x, y, name='x_y_sum')

  init_op = variables.global_variables_initializer()
  saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
  with session.Session() as sess:
    sess.run(init_op)
    sess.run(y.assign(y + 42))
    # Without the checkpoint, the variable won't be set to 42.
    ckpt_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.ckpt')
    saver.save(sess, ckpt_file)
    # Without the SaverDef, the restore op won't be named correctly.
    saver_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.saver')
    with open(saver_file, 'wb') as f:
      f.write(six.ensure_binary(saver.as_saver_def().SerializeToString()))
    def test_scalar_bounds(self):
        vector_initial_value = [7., 7.]
        vector = variables.VariableV1(vector_initial_value, 'vector')

        # Make norm as small as possible.
        loss = math_ops.reduce_sum(math_ops.square(vector))

        # Make the minimum value of each component be 1.
        var_to_bounds = {vector: (1., np.infty)}

        optimizer = external_optimizer.ScipyOptimizerInterface(
            loss, var_to_bounds=var_to_bounds)

        with self.cached_session() as sess:
            sess.run(variables.global_variables_initializer())
            optimizer.minimize(sess)
            self.assertAllClose(np.ones(2), sess.run(vector))
Ejemplo n.º 28
0
 def testStartQueueRunners(self):
   # CountUpTo will raise OUT_OF_RANGE when it reaches the count.
   zero64 = constant_op.constant(0, dtype=dtypes.int64)
   var = variables.VariableV1(zero64)
   count_up_to = var.count_up_to(3)
   queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
   init_op = variables.global_variables_initializer()
   qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
   queue_runner_impl.add_queue_runner(qr)
   with self.cached_session() as sess:
     init_op.run()
     threads = queue_runner_impl.start_queue_runners(sess)
     for t in threads:
       t.join()
     self.assertEqual(0, len(qr.exceptions_raised))
     # The variable should be 3.
     self.assertEqual(3, var.eval())
 def testVariables(self):
   step = variables.VariableV1(1)
   assign_1 = step.assign(1)
   assign_2 = step.assign(2)
   assign_100 = step.assign(100)
   decayed_lr = learning_rate_decay.exponential_decay(
       .1, step, 3, 0.96, staircase=True)
   self.evaluate(variables.global_variables_initializer())
   # No change to learning rate
   self.evaluate(assign_1.op)
   self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
   self.evaluate(assign_2.op)
   self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
   # Decayed learning rate
   self.evaluate(assign_100.op)
   expected = .1 * 0.96**(100 // 3)
   self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
Ejemplo n.º 30
0
 def testPartitionedVariableMasking(self):
     partitioner = partitioned_variables.variable_axis_size_partitioner(40)
     with self.cached_session() as session:
         with variable_scope.variable_scope("", partitioner=partitioner):
             sparsity = variables.VariableV1(0.5, name="Sparsity")
             weights = variable_scope.get_variable(
                 "weights", initializer=math_ops.linspace(1.0, 100.0, 100))
             masked_weights = pruning.apply_mask(
                 weights, scope=variable_scope.get_variable_scope())
         p = pruning.Pruning(sparsity=sparsity)
         p._spec.threshold_decay = 0.0
         mask_update_op = p.mask_update_op()
         variables.global_variables_initializer().run()
         masked_weights_val = masked_weights.eval()
         session.run(mask_update_op)
         masked_weights_val = masked_weights.eval()
         self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)