Beispiel #1
0
  def testRecoverSession(self):
    # Create a checkpoint.
    checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
    try:
      gfile.DeleteRecursively(checkpoint_dir)
    except OSError:
      pass                      # Ignore
    gfile.MakeDirs(checkpoint_dir)

    with tf.Graph().as_default():
      v = tf.Variable(1, name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      sess, initialized = sm.recover_session("", saver=saver,
                                             checkpoint_dir=checkpoint_dir)
      self.assertFalse(initialized)
      sess.run(v.initializer)
      self.assertEquals(1, sess.run(v))
      saver.save(sess, os.path.join(checkpoint_dir,
                                    "recover_session_checkpoint"))
    # Create a new Graph and SessionManager and recover.
    with tf.Graph().as_default():
      v = tf.Variable(2, name="v")
      sm2 = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      sess, initialized = sm2.recover_session("", saver=saver,
                                              checkpoint_dir=checkpoint_dir)
      self.assertTrue(initialized)
      self.assertEquals(1, sess.run(v))
Beispiel #2
0
def fc_wn(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
	''' fully connected layer '''
	name = get_name('dense', counters)
	with tf.variable_scope(name):
		if init:
			# data based initialization of parameters
			V = tf.get_variable('V', [int(x.get_shape()[1]), num_units], tf.float32,
			                    tf.random_normal_initializer(0, 0.05), trainable=True)
			V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
			x_init = tf.matmul(x, V_norm)
			m_init, v_init = tf.nn.moments(x_init, [0])
			scale_init = init_scale / tf.sqrt(v_init + 1e-10)
			g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
			b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init * scale_init, trainable=True)
			x_init = tf.reshape(scale_init, [1, num_units]) * (x_init - tf.reshape(m_init, [1, num_units]))
			if nonlinearity is not None:
				x_init = nonlinearity(x_init)
			return x_init

		else:
			V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
			tf.assert_variables_initialized([V, g, b])

			# use weight normalization (Salimans & Kingma, 2016)
			x = tf.matmul(x, V)
			scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
			x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])

			# apply nonlinearity
			if nonlinearity is not None:
				x = nonlinearity(x)
			return x
    def testRecoverSession(self):
        # Create a checkpoint.
        checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
        try:
            gfile.DeleteRecursively(checkpoint_dir)
        except OSError:
            pass  # Ignore
        gfile.MakeDirs(checkpoint_dir)

        with tf.Graph().as_default():
            v = tf.Variable(1, name="v")
            sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            sess, initialized = sm.recover_session("", saver=saver, checkpoint_dir=checkpoint_dir)
            self.assertFalse(initialized)
            sess.run(v.initializer)
            self.assertEquals(1, sess.run(v))
            saver.save(sess, os.path.join(checkpoint_dir, "recover_session_checkpoint"))
        # Create a new Graph and SessionManager and recover.
        with tf.Graph().as_default():
            v = tf.Variable(2, name="v")
            with self.test_session():
                self.assertEqual(False, tf.is_variable_initialized(v).eval())
            sm2 = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            sess, initialized = sm2.recover_session("", saver=saver, checkpoint_dir=checkpoint_dir)
            self.assertTrue(initialized)
            self.assertEqual(True, tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
            self.assertEquals(1, sess.run(v))
Beispiel #4
0
def deconv(inp, name, filter_size, out_channels, stride=1,
           padding='SAME', nonlinearity=None, init_scale=1.0):
    """ Deconvolution layer. See `conv`"""
    with tf.variable_scope(name):
        strides = [1, stride, stride, 1]
        [N, H, W, in_channels] = inp.get_shape().as_list()
        if padding == 'SAME':
            target_shape = [N, H * stride, W * stride, out_channels]
        else:
            target_shape = [N, H * stride + filter_size[0] - 1, W * stride + filter_size[1] - 1, out_channels]
        target_shape = tf.constant(target_shape, dtype=tf.int32)

        if tf.GLOBAL['init']:
            V = get_variable('V', shape=filter_size + (out_channels, in_channels), dtype=tf.float32,
                             initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3])
            out = tf.nn.conv2d_transpose(inp, V_norm, target_shape, strides, padding)
            m_init, v_init = tf.nn.moments(out, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = get_variable('g', shape=None, dtype=tf.float32, initializer=scale_init, trainable=True, regularizer=tf.contrib.layers.l2_regularizer(tf.GLOBAL['reg']))
            b = get_variable('b', shape=None, dtype=tf.float32, initializer=-m_init * scale_init, trainable=True, regularizer=tf.contrib.layers.l2_regularizer(tf.GLOBAL['reg']))
            out = tf.reshape(scale_init, [1, 1, 1, out_channels]) * (out - tf.reshape(m_init, [1, 1, 1, out_channels]))
            if nonlinearity is not None:
                out = nonlinearity(out)

        else:
            V, g, b = get_variable('V'), get_variable('g'), get_variable('b')
            tf.assert_variables_initialized([V, g, b])
            W = g[None, None, :, None] * tf.nn.l2_normalize(V, [0, 1, 3])
            out = tf.nn.conv2d_transpose(inp, W, target_shape, strides, padding) + b[None, None, None]
            if nonlinearity is not None:
                out = nonlinearity(out)

        return out
Beispiel #5
0
def weight_norm_linear(input_, output_size,
                       init=False, init_scale=1.0,
                       name="wn_linear",
                       initializer=tf.truncated_normal_initializer,
                       stddev=0.02):
    """Linear layer with Weight Normalization (Salimans, Kingma '16)."""
    with tf.variable_scope(name):
        if init:
            v = tf.get_variable("V", [int(input_.get_shape()[1]), output_size],
                                tf.float32, initializer(0, stddev), trainable=True)
            v_norm = tf.nn.l2_normalize(v.initialized_value(), [0])
            x_init = tf.matmul(input_, v_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable("g", dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable("b", dtype=tf.float32,
                                initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init, [1, output_size]) * (
                x_init - tf.reshape(m_init, [1, output_size]))
            return x_init
        else:

            v = tf.get_variable("V")
            g = tf.get_variable("g")
            b = tf.get_variable("b")
            tf.assert_variables_initialized([v, g, b])
            x = tf.matmul(input_, v)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(v), [0]))
            x = tf.reshape(scaler, [1, output_size]) * x + tf.reshape(
                b, [1, output_size])
            return x
Beispiel #6
0
def weight_norm_conv2d(input_, output_dim,
                       k_h, k_w, d_h, d_w,
                       init, init_scale,
                       stddev=0.02,
                       name="wn_conv2d",
                       initializer=tf.truncated_normal_initializer):
    """Convolution with Weight Normalization (Salimans, Kingma '16)."""
    with tf.variable_scope(name):
        if init:
            v = tf.get_variable(
                "V", [k_h, k_w] + [int(input_.get_shape()[-1]), output_dim],
                tf.float32, initializer(0, stddev), trainable=True)
            v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 2])
            x_init = tf.nn.conv2d(input_, v_norm, strides=[1, d_h, d_w, 1],
                                  padding="SAME")
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable(
                "g", dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable(
                "b", dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * (
                x_init - tf.reshape(m_init, [1, 1, 1, output_dim]))
            return x_init
        else:
            v = tf.get_variable("V")
            g = tf.get_variable("g")
            b = tf.get_variable("b")
            tf.assert_variables_initialized([v, g, b])
            w = tf.reshape(g, [1, 1, 1, output_dim]) * tf.nn.l2_normalize(
                v, [0, 1, 2])
            x = tf.nn.bias_add(
                tf.nn.conv2d(input_, w, [1, d_h, d_w, 1], padding="SAME"), b)
            return x
Beispiel #7
0
def conv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[int(x.get_shape()[-1]),num_filters], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,2])
            x_init = tf.nn.conv2d(x, V_norm, [1]+stride+[1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,1,num_filters])*tf.nn.l2_normalize(V,[0,1,2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1]+stride+[1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x
Beispiel #8
0
        def weight_norm(x):
            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
            x = tf.reshape(scaler, [1, num_outputs]) * x + tf.reshape(b, [1, num_outputs])

            return x, g, b
    def testPrepareSessionFails(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
        checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
        try:
            gfile.DeleteRecursively(checkpoint_dir)
            gfile.DeleteRecursively(checkpoint_dir2)
        except errors.OpError:
            pass  # Ignore
        gfile.MakeDirs(checkpoint_dir)

        with tf.Graph().as_default():
            v = tf.Variable([1.0, 2.0, 3.0], name="v")
            sm = tf.train.SessionManager(
                ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            sess = sm.prepare_session("",
                                      init_op=tf.initialize_all_variables(),
                                      saver=saver,
                                      checkpoint_dir=checkpoint_dir)
            self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
            checkpoint_filename = os.path.join(checkpoint_dir,
                                               "prepare_session_checkpoint")
            saver.save(sess, checkpoint_filename)
        # Create a new Graph and SessionManager and recover.
        with tf.Graph().as_default():
            # Renames the checkpoint directory.
            os.rename(checkpoint_dir, checkpoint_dir2)
            gfile.MakeDirs(checkpoint_dir)
            v = tf.Variable([6.0, 7.0, 8.0], name="v")
            with self.test_session():
                self.assertEqual(False, tf.is_variable_initialized(v).eval())
            tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            # This should fail as there's no checkpoint within 2 seconds.
            with self.assertRaisesRegexp(
                    RuntimeError,
                    "no init_op or init_fn or local_init_op was given"):
                sess = sm.prepare_session("",
                                          init_op=None,
                                          saver=saver,
                                          checkpoint_dir=checkpoint_dir,
                                          wait_for_checkpoint=True,
                                          max_wait_secs=2)
            # Rename the checkpoint directory back.
            gfile.DeleteRecursively(checkpoint_dir)
            os.rename(checkpoint_dir2, checkpoint_dir)
            # This should succeed as there's checkpoint.
            sess = sm.prepare_session("",
                                      init_op=None,
                                      saver=saver,
                                      checkpoint_dir=checkpoint_dir,
                                      wait_for_checkpoint=True,
                                      max_wait_secs=2)
            self.assertEqual(
                True,
                tf.is_variable_initialized(
                    sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
Beispiel #10
0
        def weight_norm(x):
            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g, [1, 1, 1, num_outputs]) * tf.nn.l2_normalize(V, [0, 1, 2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], padding, data_format=data_format), b)

            return x, g, b
Beispiel #11
0
def conv(inp,
         name,
         filter_size,
         out_channels,
         stride=1,
         padding='SAME',
         nonlinearity=None,
         init_scale=1.0):

    with tf.variable_scope(name):

        strides = [1, stride, stride, 1]
        in_channels = inp.get_shape().as_list()[3]

        if tf.GLOBAL['init']:
            V = tf.get_variable(
                'V',
                shape=filter_size + [in_channels, out_channels],
                dtype=tf.float32,
                initializer=tf.random_normal_initializer(0, 0.05),
                trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
            out = tf.nn.conv2d(inp, V_norm, strides, padding)
            m_init, v_init = tf.nn.moments(out, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g',
                                shape=None,
                                dtype=tf.float32,
                                initializer=scale_init,
                                trainable=True)
            b = tf.get_variable('b',
                                shape=None,
                                dtype=tf.float32,
                                initializer=-m_init * scale_init,
                                trainable=True)
            out = tf.reshape(scale_init, [1, 1, 1, out_channels]) * (
                out - tf.reshape(m_init, [1, 1, 1, out_channels]))
            if nonlinearity is not None:
                out = nonlinearity(out)
            return out

        else:
            V, g, b = tf.get_variable('V'), tf.get_variable(
                'g'), tf.get_variable('b')
            tf.assert_variables_initialized([V, g, b])

            W = g[None, None, None] * tf.nn.l2_normalize(V, [0, 1, 2])

            out = tf.nn.conv2d(inp, W, strides, padding) + b[None, None, None]

            if nonlinearity is not None:
                out = nonlinearity(out)

    return out
Beispiel #12
0
def weight_norm_deconv2d(x,
                         output_dim,
                         k_h,
                         k_w,
                         d_h,
                         d_w,
                         init=False,
                         init_scale=1.0,
                         stddev=0.02,
                         name="wn_deconv2d",
                         initializer=tf.truncated_normal_initializer):
    """Performs Transposed Convolution with Weight Normalization."""
    xs = x.get_shape().as_list()
    target_shape = [xs[0], xs[1] * d_h, xs[2] * d_w, output_dim]
    with tf.variable_scope(name):
        if init:
            v = tf.get_variable(
                "V",
                [k_h, k_w] + [output_dim, int(x.get_shape()[-1])],
                tf.float32,
                initializer(0, stddev),
                trainable=True)
            v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 3])
            x_init = tf.nn.conv2d_transpose(x,
                                            v_norm,
                                            target_shape, [1, d_h, d_w, 1],
                                            padding="SAME")
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable("g",
                                dtype=tf.float32,
                                initializer=scale_init,
                                trainable=True)
            b = tf.get_variable("b",
                                dtype=tf.float32,
                                initializer=-m_init * scale_init,
                                trainable=True)
            x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * (
                x_init - tf.reshape(m_init, [1, 1, 1, output_dim]))
            return x_init
        else:
            v = tf.get_variable("v")
            g = tf.get_variable("g")
            b = tf.get_variable("b")
            tf.assert_variables_initialized([v, g, b])
            w = tf.reshape(g, [1, 1, output_dim, 1]) * tf.nn.l2_normalize(
                v, [0, 1, 3])
            x = tf.nn.conv2d_transpose(x,
                                       w,
                                       target_shape,
                                       strides=[1, d_h, d_w, 1],
                                       padding="SAME")
            x = tf.nn.bias_add(x, b)
            return x
Beispiel #13
0
def conv(inp, name, filter_size, out_channels, stride=1,
         padding='SAME', nonlinearity=None, init_scale=1.0, dilation=None):
    """Convolutional layer.
    If tf.GLOBAL['init'] is true, this creates the layers paramenters (g, b, W) : L(x) = g|W| (*) x + b

    Args:
      x: input tensor
      name (str): variable scope name
      filter_size (int pair): filter size
      out_channels (int): number of output channels
      strid (int): horizontal and vertical stride
      padding (str): padding mode
      nonlinearity (func): activation function
      init_scale: initial scale for the weights and bias variables
      dilation: optional dilation rate
    """
    with tf.variable_scope(name):
        strides = [1, stride, stride, 1]
        in_channels = inp.get_shape().as_list()[3]

        if tf.GLOBAL['init']:
            V = get_variable('V', shape=tuple(filter_size) + (in_channels, out_channels), dtype=tf.float32,
                             initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
            if dilation is None:
                out = tf.nn.conv2d(inp, V_norm, strides, padding)
            else:
                assert(stride == 1)
                out = tf.nn.atrous_conv2d(inp, V_norm, dilation, padding)
            m_init, v_init = tf.nn.moments(out, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = get_variable('g', shape=None, dtype=tf.float32, initializer=scale_init, trainable=True, regularizer=tf.contrib.layers.l2_regularizer(tf.GLOBAL['reg']))
            b = get_variable('b', shape=None, dtype=tf.float32, initializer=-m_init * scale_init, trainable=True, regularizer=tf.contrib.layers.l2_regularizer(tf.GLOBAL['reg']))
            out = tf.reshape(scale_init, [1, 1, 1, out_channels]) * (out - tf.reshape(m_init, [1, 1, 1, out_channels]))
            if nonlinearity is not None:
                out = nonlinearity(out)

        else:
            V, g, b = get_variable('V'), get_variable('g'), get_variable('b')
            tf.assert_variables_initialized([V, g, b])
            W = g[None, None, None] * tf.nn.l2_normalize(V, [0, 1, 2])
            if dilation is None:
                out = tf.nn.conv2d(inp, W, strides, padding) + b[None, None, None]
            else:
                assert(stride == 1)
                out = tf.nn.atrous_conv2d(inp, W, dilation, padding) + b[None, None, None]
            if nonlinearity is not None:
                out = nonlinearity(out)

        return out
Beispiel #14
0
def fully_connected(x,
                    num_outputs,
                    activation_fn=None,
                    init_scale=1.,
                    is_init=False,
                    ema=None,
                    name=None):
    ''' fully connected layer '''
    with tf.variable_scope(name, default_name='Full'):
        if is_init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]), num_outputs],
                                tf.float32,
                                tf.random_normal_initializer(0, 0.05),
                                trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g',
                                dtype=tf.float32,
                                initializer=scale_init,
                                trainable=True)
            b = tf.get_variable('b',
                                dtype=tf.float32,
                                initializer=-m_init * scale_init,
                                trainable=True)
            x_init = scale_init * (x_init - m_init)
            if activation_fn is not None:
                x_init = activation_fn(x_init)
            return x_init

        else:
            V = tf.get_variable('V')
            g = tf.get_variable('g')
            b = tf.get_variable('b')

            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
            x = scaler * x + b

            # apply activation_fn
            if activation_fn is not None:
                x = activation_fn(x)
            return x
    def testPrepareSessionFails(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
        checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
        try:
            gfile.DeleteRecursively(checkpoint_dir)
            gfile.DeleteRecursively(checkpoint_dir2)
        except OSError:
            pass  # Ignore
        gfile.MakeDirs(checkpoint_dir)

        with tf.Graph().as_default():
            v = tf.Variable([1.0, 2.0, 3.0], name="v")
            sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            sess = sm.prepare_session(
                "", init_op=tf.initialize_all_variables(), saver=saver, checkpoint_dir=checkpoint_dir
            )
            self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
            checkpoint_filename = os.path.join(checkpoint_dir, "prepare_session_checkpoint")
            saver.save(sess, checkpoint_filename)
        # Create a new Graph and SessionManager and recover.
        with tf.Graph().as_default():
            # Renames the checkpoint directory.
            os.rename(checkpoint_dir, checkpoint_dir2)
            gfile.MakeDirs(checkpoint_dir)
            v = tf.Variable([6.0, 7.0, 8.0], name="v")
            with self.test_session():
                self.assertEqual(False, tf.is_variable_initialized(v).eval())
            tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
            saver = tf.train.Saver({"v": v})
            # This should fail as there's no checkpoint within 2 seconds.
            with self.assertRaisesRegexp(RuntimeError, "no init_op or init_fn was given"):
                sess = sm.prepare_session(
                    "",
                    init_op=None,
                    saver=saver,
                    checkpoint_dir=checkpoint_dir,
                    wait_for_checkpoint=True,
                    max_wait_secs=2,
                )
            # Rename the checkpoint directory back.
            gfile.DeleteRecursively(checkpoint_dir)
            os.rename(checkpoint_dir2, checkpoint_dir)
            # This should succeed as there's checkpoint.
            sess = sm.prepare_session(
                "", init_op=None, saver=saver, checkpoint_dir=checkpoint_dir, wait_for_checkpoint=True, max_wait_secs=2
            )
            self.assertEqual(True, tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
    def __init__(self, model, conf, model_path=None):
        self.model = model
        self.conf = conf

        print('Defining the session')
        sess_config = tf.ConfigProto()
        sess_config.allow_soft_placement = True
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=sess_config)
        self.sess.run(tf.global_variables_initializer())
        try:
            self.sess.run(tf.assert_variables_initialized())
        except tf.errors.FailedPreconditionError:
            raise RuntimeError('Not all variables initialized')

        self.saver = tf.train.Saver(tf.global_variables())
        if model_path:
            print('Restoring model from: ' + str(model_path))
            self.saver.restore(self.sess, model_path)

        self.binary_opening_filter = sitk.BinaryMorphologicalOpeningImageFilter(
        )
        self.binary_opening_filter.SetKernelRadius(1)

        self.binary_closing_filter = sitk.BinaryMorphologicalClosingImageFilter(
        )
        self.binary_closing_filter.SetKernelRadius(1)

        self.erosion_filter = sitk.BinaryErodeImageFilter()
        self.erosion_filter.SetKernelRadius(1)

        self.dilation_filter = sitk.BinaryDilateImageFilter()
        self.dilation_filter.SetKernelRadius(1)
Beispiel #17
0
    def initialize(self, *args, **kwargs):
        super().initialize()

        # invalidate saver
        self._saver = None

        self.guidance = kwargs.get('guidance')
        self.summary_service = kwargs.get('summary_service')

        self.log_directory = kwargs.get('log_directory', '.')
        self.checkpoints_directory = kwargs.get('checkpoints_directory', '.')
        self.evaluations_directory = kwargs.get('evaluations_directory', '.')

        # ------- init some internal variable ----------
        self.training_steps = 0  # training step counter
        # saves for the current episode an intermediate buffer, which also allows to use an n-step return, save
        self.episode_buffer = []

        tf.reset_default_graph()
        with self.context.graph.as_default():
            # Create variables
            self.create_variables()

            # merge all summaries
            self.summarize = tf.summary.merge_all()
            self.no_op = tf.no_op()
            var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

            # initialize variables
            self.context.session.run(tf.variables_initializer(var_lists))
            # Make sure all variables are initialized
            self.context.session.run(tf.assert_variables_initialized())

            # initialize common summaries for all agents
            self._initialize_summary_writer()
Beispiel #18
0
 def testPrepareSessionSucceedsWithInitFn(self):
   with tf.Graph().as_default():
     v = tf.Variable([125], name="v")
     sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
     sess = sm.prepare_session("",
                               init_fn=lambda sess: sess.run(v.initializer))
     self.assertAllClose([125], sess.run(v))
Beispiel #19
0
 def init_variables(self):
     check_vars_initialized = tf.assert_variables_initialized(
         self.variables)
     try:
         self.session.run(check_vars_initialized)
     except tf.errors.FailedPreconditionError:
         self.session.run(tf.variables_initializer(self.variables))
Beispiel #20
0
def conv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', init_scale=1., counters={}, init=False, **kwargs):
    ''' convolutional layer '''
    num_filters = int(num_filters)
    strides = [1] + stride + [1]
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            xs = x.shape.as_list()
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size + [xs[-1], num_filters],
                                tf.float32, tf.random_normal_initializer(0, 0.05))
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
            x_init = tf.nn.conv2d(x, V_norm, strides, pad)
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer = scale_init)
            b = tf.get_variable('b', dtype=tf.float32, initializer = -m_init * scale_init)
            x_init = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))

            return x_init
        else:
            V = tf.get_variable("V")
            g = tf.get_variable("g")
            b = tf.get_variable("b")
            with tf.control_dependencies([tf.assert_variables_initialized([V, g, b])]):
                # use weight normalization (Salimans & Kingma, 2016)
                W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])

                # calculate convolutional layer output
                x = tf.nn.bias_add(tf.nn.conv2d(x, W, strides, pad), b)

                return x
 def testPrepareSessionSucceedsWithInitFeedDict(self):
     with tf.Graph().as_default():
         p = tf.placeholder(tf.float32, shape=(3,))
         v = tf.Variable(p, name="v")
         sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
         sess = sm.prepare_session("", init_op=tf.initialize_all_variables(), init_feed_dict={p: [1.0, 2.0, 3.0]})
         self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
Beispiel #22
0
    def __init__(self, session,
                 optimizer,
                 q_network,
                 state_dim,
                 num_actions,
                 batch_size=32,
                 init_exp=0.5,
                 final_exp=0.1,
                 anneal_steps=10000,
                 replay_buffer_size=10000,
                 store_replay_every=5,
                 discount_factor=0.9,
                 target_update_rate=0.01,
                 reg_param=0.01,
                 max_gradient=5,
                 double_q_learning=False,
                 summary_writer=None,
                 summary_every=100):
        self.summary_every=1

        # tensorflow
        self.session=session
        self.optimizer=optimizer
        self.summary_writer=summary_writer

        # model parts
        self.q_network=q_network
        self.replayBuffer=ReplayBuffer(buffer_size=replay_buffer_size)

        # q learning
        self.batch_size = batch_size
        self.state_dim = state_dim
        self.num_actions = num_actions
        self.exploration = init_exp
        self.final_exp = final_exp
        self.anneal_steps = anneal_steps
        self.discount_factor = discount_factor
        self.target_update_rate = target_update_rate
        self.double_q_learning = double_q_learning

        # training
        self.max_gradient = max_gradient
        self.reg_param = reg_param

        # counter
        self.store_replay_every = store_replay_every
        self.store_experience_cost = 0
        self.train_iteration = reg_param

        #initialize
        self.create_variables()
        var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
        self.session.run(tf.initialize_vatriables(var_lists))

        # verify
        self.session.run(tf.assert_variables_initialized())

        if self.summary_writer is not None:
            self.summary_writer.add_graph(self.session.graph)
            self.summary_every = summary_every
Beispiel #23
0
    def __init__(self,
                 state_size,
                 num_steps,
                 num_classes,
                 learning_rate,
                 summary_every=100):
        """Create a Basic RNN classfier with the given STATE_SIZE,
        NUM_STEPS, and NUM_CLASSES
        """
        self.state_size = state_size
        self.num_steps = num_steps
        self.num_classes = num_classes
        self.learning_rate = learning_rate

        # tensorflow machinery
        self.session = tf.Session()
        self.summary_writer = tf.summary.FileWriter(
            os.path.join(os.getcwd(), "tensorboard/"))
        self.no_op = tf.no_op()

        # counters
        self.train_itr = 0

        # create and initialize variables
        self.create_graph()
        var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.session.run(tf.variables_initializer(var_lists))

        # make sure all variables are initialized
        self.session.run(tf.assert_variables_initialized())

        # add the graph
        self.summary_writer.add_graph(self.session.graph)
        self.summary_every = summary_every
Beispiel #24
0
    def __init__(
            self,
            session,
            optimizer,
            q_network,
            state_dim,
            num_actions,
            batch_size=32,  # batch size
            init_exp=0.5,  # initial exploration prob
            final_exp=0.1,  # final exploration prob
            anneal_steps=10000,  # number of steps for annealing exploration
            replay_buffer_size=10000,
            store_replay_every=5,  # how frequent to store experience
            discount_factor=0.9,  # discount future rewards
            target_update_rate=0.01,
            reg_param=0.01,  # regularization constants
            max_gradient=5,  # max gradient norms
            double_q_learning=False,
            summary_writer=None,
            summary_every=100):
        # tensorflow setup
        self.session = session
        self.optimizer = optimizer
        self.summary_writer = summary_writer

        # model components
        self.q_network = q_network
        self.replay_buffer = ReplayBuffer(buffer_size=replay_buffer_size)

        # Q-learning parameters
        self.batch_size = batch_size
        self.state_dim = state_dim
        self.num_actions = num_actions
        self.exploration = init_exp
        self.init_exp = init_exp
        self.final_exp = final_exp
        self.anneal_steps = anneal_steps
        self.discount_factor = discount_factor
        self.target_update_rate = target_update_rate
        self.double_q_learning = double_q_learning

        # training parameters
        self.max_gradient = max_gradient
        self.reg_param = reg_param

        # counters
        self.store_replay_every = store_replay_every
        self.store_experience_cnt = 0
        self.train_iteration = 0

        # create and initializer variables
        self.create_variables()
        var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.session.run(tf.variables_initializer(var_lists))
        # assert that all variables are initialized
        self.session.run(tf.assert_variables_initialized())

        if self.summary_writer is not None:
            self.summary_writer.add_graph(self.session.graph)
            self.summary_every = summary_every
Beispiel #25
0
def dense(x, num_units, init_scale=1., counters={}, init=False, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            xs = x.shape.as_list()
            # data based initialization of parameters
            V = tf.get_variable('V', [xs[1], num_units], tf.float32,
                                tf.random_normal_initializer(0, 0.05))
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init)
            b = tf.get_variable('b',
                                dtype=tf.float32,
                                initializer=-m_init * scale_init)
            x_init = tf.reshape(scale_init, [1, num_units]) * (
                x_init - tf.reshape(m_init, [1, num_units]))

            return x_init
        else:
            V = tf.get_variable("V")
            g = tf.get_variable("g")
            b = tf.get_variable("b")
            with tf.control_dependencies(
                [tf.assert_variables_initialized([V, g, b])]):
                # use weight normalization (Salimans & Kingma, 2016)
                x = tf.matmul(x, V)
                scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
                x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(
                    b, [1, num_units])

                return x
 def testPrepareSessionSucceedsWithInitFn(self):
   with tf.Graph().as_default():
     v = tf.Variable([125], name="v")
     sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
     sess = sm.prepare_session("",
                               init_fn=lambda sess: sess.run(v.initializer))
     self.assertAllClose([125], sess.run(v))
    def __init__(self, session, optimizer_critic, optimizer_actor, critic_network, actor_network, gamma_lmbda,
                 state_dim, num_actions, summary_writer=None, summary_every=5):

        self.session = session
        self.summary_writer = summary_writer
        self.optimizer_critic = optimizer_critic
        self.optimizer_actor = optimizer_actor

        self.actor_network = actor_network
        self.critic_network = critic_network

        self.state_dim = state_dim
        self.num_actions = num_actions
        self.gamma_lmbda = tf.constant(gamma_lmbda)

        # initialize the graph on tensorflow
        self.create_variables()
        var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
        self.session.run(tf.initialize_variables(var_lists))

        # make sure the variables in graph are initialized
        self.session.run(tf.assert_variables_initialized())

        if self.summary_writer is not None:
            self.summary_writer.add_graph(self.session.graph)
            self.summary_every = summary_every
Beispiel #28
0
def deconv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad == 'SAME':
        target_shape = [xs[0], xs[1] * stride[0],
                        xs[2] * stride[1], num_filters]
    else:
        target_shape = [xs[0], xs[1] * stride[0] + filter_size[0] -
                        1, xs[2] * stride[1] + filter_size[1] - 1, num_filters]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size + [num_filters, int(x.get_shape(
            )[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3])
            x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [
                                            1] + stride + [1], padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = tf.reshape(scale_init, [
                                1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g, [1, 1, num_filters, 1]) * \
                tf.nn.l2_normalize(V, [0, 1, 3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(
                x, W, target_shape, [1] + stride + [1], padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x
Beispiel #29
0
    def __init__(
            self,
            session,
            optimizer,
            policy_network,
            state_dim,
            num_actions,
            gru_unit_size,
            num_step,
            num_layers,
            save_path,
            global_step,
            max_gradient=5,  # max gradient norms
            entropy_bonus=0.001,
            summary_writer=None,
            summary_every=100,
            loss_function="l2"):

        # tensorflow machinery
        self.session = session
        self.optimizer = optimizer
        self.summary_writer = summary_writer
        self.summary_every = summary_every
        self.gru_unit_size = gru_unit_size
        self.num_step = num_step
        self.num_layers = num_layers
        self.no_op = tf.no_op()

        # model components
        self.policy_network = policy_network
        self.state_dim = state_dim
        self.num_actions = num_actions
        self.loss_function = loss_function

        # training parameters
        self.max_gradient = max_gradient
        self.entropy_bonus = entropy_bonus

        #counter
        self.global_step = global_step
        self.reward_stats = []

        # create and initialize variables
        self.create_variables()
        var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.session.run(tf.variables_initializer(var_lists))

        # make sure all variables are initialized
        self.session.run(tf.assert_variables_initialized())

        # try load saved model
        self.saver = tf.train.Saver(tf.global_variables())
        self.save_path = save_path
        self.load_model()

        if self.summary_writer is not None:
            # graph was not available when journalist was created
            self.summary_writer.add_graph(self.session.graph)
            self.summary_every = summary_every
Beispiel #30
0
  def __init__(self, session,
                     optimizer,
                     policy_network,
                     state_dim,
                     num_actions,
                     init_exp=0.1,         # initial exploration prob
                     final_exp=0.0,        # final exploration prob
                     anneal_steps=10000,   # N steps for annealing exploration
                     discount_factor=0.99, # discount future rewards
                     reg_param=0.0001,      # regularization constants
                     max_gradient=5,       # max gradient norms
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.policy_network = policy_network

    # training parameters
    self.state_dim       = state_dim
    self.num_actions     = num_actions
    self.discount_factor = discount_factor
    self.max_gradient    = max_gradient
    self.reg_param       = reg_param

    # exploration parameters
    self.exploration  = init_exp
    self.init_exp     = init_exp
    self.final_exp    = final_exp
    self.anneal_steps = anneal_steps

    # counters
    self.train_iteration = 0

    # rollout buffer
    self.state_buffer  = []
    self.reward_buffer = []
    self.action_buffer = []

    # record reward history for normalization
    self.all_rewards = []
    self.max_reward_length = 10000

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
    self.session.run(tf.initialize_variables(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph_def)
      self.summary_every = summary_every
  def __init__(self, session,
                     optimizer,
                     policy_network,
                     state_dim,
                     num_actions,
                     init_exp=0.5,         # initial exploration prob
                     final_exp=0.0,        # final exploration prob
                     anneal_steps=10000,   # N steps for annealing exploration
                     discount_factor=0.99, # discount future rewards
                     reg_param=0.001,      # regularization constants
                     max_gradient=5,       # max gradient norms
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.policy_network = policy_network

    # training parameters
    self.state_dim       = state_dim
    self.num_actions     = num_actions
    self.discount_factor = discount_factor
    self.max_gradient    = max_gradient
    self.reg_param       = reg_param

    # exploration parameters
    self.exploration  = init_exp
    self.init_exp     = init_exp
    self.final_exp    = final_exp
    self.anneal_steps = anneal_steps

    # counters
    self.train_iteration = 0

    # rollout buffer
    self.state_buffer  = []
    self.reward_buffer = []
    self.action_buffer = []

    # record reward history for normalization
    self.all_rewards = []
    self.max_reward_length = 1000000

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
    self.session.run(tf.initialize_variables(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every
Beispiel #32
0
  def __init__(self, session,
                     optimizer,
                     actor_network,
                     critic_network,
                     state_dim,
                     action_dim,
                     batch_size=32,
                     replay_buffer_size=1000000, # size of replay buffer
                     store_replay_every=1,       # how frequent to store experience
                     discount_factor=0.99,       # discount future rewards
                     target_update_rate=0.01,
                     reg_param=0.01,             # regularization constants
                     max_gradient=5,             # max gradient norms
                     noise_sigma=0.20,
                     noise_theta=0.15,
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.actor_network  = actor_network
    self.critic_network = critic_network
    self.replay_buffer  = ReplayBuffer(buffer_size=replay_buffer_size)

    # training parameters
    self.batch_size         = batch_size
    self.state_dim          = state_dim
    self.action_dim         = action_dim
    self.discount_factor    = discount_factor
    self.target_update_rate = target_update_rate
    self.max_gradient       = max_gradient
    self.reg_param          = reg_param

    # Ornstein-Uhlenbeck noise for exploration
    self.noise_var = tf.Variable(tf.zeros([1, action_dim]))
    noise_random = tf.random_normal([1, action_dim], stddev=noise_sigma)
    self.noise = self.noise_var.assign_sub((noise_theta) * self.noise_var - noise_random)

    # counters
    self.store_replay_every   = store_replay_every
    self.store_experience_cnt = 0
    self.train_iteration      = 0

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
    self.session.run(tf.initialize_variables(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every
Beispiel #33
0
  def __init__(self, session,
                     optimizer,
                     actor_network,
                     critic_network,
                     state_dim,
                     action_dim,
                     batch_size=32,
                     replay_buffer_size=1000000, # size of replay buffer
                     store_replay_every=1,       # how frequent to store experience
                     discount_factor=0.99,       # discount future rewards
                     target_update_rate=0.01,
                     reg_param=0.01,             # regularization constants
                     max_gradient=5,             # max gradient norms
                     noise_sigma=0.20,
                     noise_theta=0.15,
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.actor_network  = actor_network
    self.critic_network = critic_network
    self.replay_buffer  = ReplayBuffer(buffer_size=replay_buffer_size)

    # training parameters
    self.batch_size         = batch_size
    self.state_dim          = state_dim
    self.action_dim         = action_dim
    self.discount_factor    = discount_factor
    self.target_update_rate = target_update_rate
    self.max_gradient       = max_gradient
    self.reg_param          = reg_param

    # Ornstein-Uhlenbeck noise for exploration
    self.noise_var = tf.Variable(tf.zeros([1, action_dim]))
    noise_random = tf.random_normal([1, action_dim], stddev=noise_sigma)
    self.noise = self.noise_var.assign_sub((noise_theta) * self.noise_var - noise_random)

    # counters
    self.store_replay_every   = store_replay_every
    self.store_experience_cnt = 0
    self.train_iteration      = 0

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    self.session.run(tf.variables_initializer(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every
    def testWaitForSessionReturnsNoneAfterTimeout(self):
        with tf.Graph().as_default():
            tf.Variable(1, name="v")
            sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized(), recovery_wait_secs=1)

            # Set max_wait_secs to allow us to try a few times.
            with self.assertRaises(errors.DeadlineExceededError):
                sm.wait_for_session(master="", max_wait_secs=3)
Beispiel #35
0
def initializeRemainingVars(sess, feed_dict):
    varlist = tf.global_variables()
    for var in varlist:
        try:
            sess.run(tf.assert_variables_initialized([var]))
        except tf.errors.FailedPreconditionError:
            sess.run(tf.variables_initializer([var]))
            print('Initializing variable:%s' % var.name)
Beispiel #36
0
def initializeRemainingVars(sess,feed_dict):
    varlist = tf.global_variables()
    for var in varlist:
        try:
            sess.run(tf.assert_variables_initialized([var]))
        except tf.errors.FailedPreconditionError:
            sess.run(tf.variables_initializer([var]))
            print('Initializing variable:%s'%var.name)
 def testPrepareSessionSucceeds(self):
     with tf.Graph().as_default():
         v = tf.Variable([1.0, 2.0, 3.0], name="v")
         sm = tf.train.SessionManager(
             ready_op=tf.assert_variables_initialized())
         sess = sm.prepare_session("",
                                   init_op=tf.initialize_all_variables())
         self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
Beispiel #38
0
 def testPrepareSessionSucceedsWithInitFeedDict(self):
   with tf.Graph().as_default():
     p = tf.placeholder(tf.float32, shape=(3,))
     v = tf.Variable(p, name="v")
     sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
     sess = sm.prepare_session("",
                               init_op=tf.initialize_all_variables(),
                               init_feed_dict={p: [1.0, 2.0, 3.0]})
     self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
Beispiel #39
0
  def testWaitForSessionReturnsNoneAfterTimeout(self):
    with tf.Graph().as_default():
      tf.Variable(1, name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized(),
                                   recovery_wait_secs=1)

      # Set max_wait_secs to allow us to try a few times.
      with self.assertRaises(errors.DeadlineExceededError):
        sm.wait_for_session(master="", max_wait_secs=3000)
 def _create_initializers(self):
   if self._var_count != len(tf.all_variables()):
     self._saver = tf.train.Saver(tf.all_variables(), max_to_keep=5)
     self._init = tf.initialize_all_variables()
     self._check_inited = tf.assert_variables_initialized()
     self._var_count = len(tf.all_variables())
     if self._summary_writer:
       self._summaries = tf.merge_all_summaries()
       self._summary_writer.add_graph(tf.get_default_graph().as_graph_def())
Beispiel #41
0
 def start(self):
   with self._sess.graph.as_default():
     self.run(tf.assert_variables_initialized())
     # create and launch threads for all queue_runners
     # it is like start_queue_runners, but manually
     for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
       self._threads.extend(qr.create_threads(
         self._sess, coord=self._coord, daemon=True, start=True
       ))
Beispiel #42
0
  def __init__(self, session,
                     optimizer,
                     policy_network,
                     observation_dim,
                     num_actions,
                     gru_unit_size,
                     num_step,
                     num_layers,
                     save_path,
                     global_step,
                     max_gradient=5,
                     entropy_bonus=0.001,
                     summary_writer=None,
                     loss_function="l2",
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer
    self.summary_every  = summary_every
    self.gru_unit_size  = gru_unit_size
    self.num_step       = num_step
    self.num_layers     = num_layers
    self.no_op          = tf.no_op()

    # model components
    self.policy_network  = policy_network
    self.observation_dim = observation_dim
    self.num_actions     = num_actions
    self.loss_function   = loss_function

    # training parameters
    self.max_gradient    = max_gradient
    self.entropy_bonus   = entropy_bonus

    #counter
    self.global_step = global_step

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    self.session.run(tf.variables_initializer(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    # try load saved model
    self.saver = tf.train.Saver(tf.global_variables())
    self.save_path = save_path
    self.load_model()

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every
    def __init__(
     self, optimizer, session, Network, env:gym.Env, learning_rate: int , in_dim : int, out_dim : int, memory_size: int, batch_size: int, target_update: int, epsilon_decay: float,
     max_epsilon: float = 1.0, min_epsilon: float = 0.1, gamma: float = 0.99):

        obs_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n
        self.env  = env
        self.learning_rate = learning_rate
        self.memory = Replay_Buffer(obs_dim, memory_size, batch_size)
        self.batch_size = batch_size
        self.epsilon = max_epsilon
        self.epsilon_decay = epsilon_decay
        self.max_epsilon = max_epsilon
        self.min_epsilon = min_epsilon
        self.target_update = target_update
        self.gamma = gamma
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.target_params = None
        self.mask = None
        self.doulbe_mask = None
        self.target_Q_Value = None
        self.estimated_Q_Value = None
        self.loss = None
        self.train_op = None

        self.sess = session
        self.optimizer = optimizer
                
        with tf.variable_scope("model", reuse=tf.AUTO_REUSE):    
             self.dqn = Network
             self.states = tf.placeholder(tf.float32, shape = [None, self.in_dim], name="states")       	
             self.estimated_Q_Value = self.dqn(self.states)

             self.mask = tf.placeholder(tf.float32, shape = [None, 2], name="mask")       	             
             self.replay_estimated_Q_Value = tf.multiply(self.dqn(self.states), self.mask)
             self.replay_estimated_Q_Value = tf.reduce_max(self.replay_estimated_Q_Value, axis=1)

        with tf.variable_scope("target", reuse=False):    
             self.dqn_target = Network
             self.target_states = tf.placeholder(tf.float32, shape = [None, self.in_dim], name="next_states")       	             
             self.doulbe_mask = tf.placeholder(tf.float32, shape = [None, 2], name="double_mask")
             self.reward = tf.placeholder(tf.float32, shape = [None, ], name="reward")                
             self.target_Q_Value =  tf.reduce_max( self.gamma * tf.multiply(self.dqn_target(self.target_states), self.doulbe_mask),axis = 1) + self.reward
             self.target_Q_Value = tf.stop_gradient(self.target_Q_Value)

        self.loss = pow(self.target_Q_Value - self.replay_estimated_Q_Value,2)/2
        self.train_op = self.optimizer.minimize(self.loss)

        var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.sess.run(tf.variables_initializer(var_lists)) 
        self.sess.run(tf.assert_variables_initialized())


        self.transition = list()
        self.is_test = False
Beispiel #44
0
 def testVariables(self):
   with tf.Graph().as_default(), self.test_session() as sess:
     v = tf.Variable([1, 2])
     w = tf.Variable([3, 4])
     _ = v, w
     inited = tf.assert_variables_initialized()
     with self.assertRaisesOpError("Attempting to use uninitialized value"):
       sess.run(inited)
     tf.initialize_all_variables().run()
     sess.run(inited)
Beispiel #45
0
 def _create_initializers(self):
   if self._var_count != len(tf.all_variables()):
     save_dir = os.path.dirname(self._save_path) if self._save_path else None
     if save_dir and not tf.gfile.IsDirectory(save_dir):
       tf.gfile.MakeDirs(save_dir)
     self._saver = tf.train.Saver(tf.all_variables(), max_to_keep=5)
     self._init = tf.initialize_all_variables()
     self._check_inited = tf.assert_variables_initialized()
     self._var_count = len(tf.all_variables())
     if self._summary_writer:
       self._summaries = tf.merge_all_summaries()
       self._summary_writer.add_graph(tf.get_default_graph())
Beispiel #46
0
 def testVariableList(self):
   with tf.Graph().as_default(), self.test_session() as sess:
     v = tf.Variable([1, 2])
     w = tf.Variable([3, 4])
     inited = tf.assert_variables_initialized([v])
     with self.assertRaisesOpError("Attempting to use uninitialized value"):
       inited.op.run()
     sess.run(w.initializer)
     with self.assertRaisesOpError("Attempting to use uninitialized value"):
       inited.op.run()
     v.initializer.run()
     inited.op.run()
Beispiel #47
0
 def is_initialized_in(self, session):
     """Check if the TensorFlow variables are initialized"""
     if self._initialized:
         return True
     tf_var_list = self.get_tensorflow_variables()
     if len(tf_var_list) == 0:
         return True
     try:
         session.run(tf.assert_variables_initialized(self.get_tensorflow_variables()))
         self._initialized = True
         return True
     except tf.errors.FailedPreconditionError:
         return False
Beispiel #48
0
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[
                                1]), num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = tf.reshape(
                scale_init, [1, num_units]) * (x_init - tf.reshape(m_init, [1, num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
            x = tf.reshape(scaler, [1, num_units]) * \
                x + tf.reshape(b, [1, num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x
  def __init__(self, session,
                     optimizer,
                     q_network,
                     state_dim,
                     num_actions,
                     batch_size=32,
                     init_exp=0.5,       # initial exploration prob
                     final_exp=0.1,      # final exploration prob
                     anneal_steps=10000, # N steps for annealing exploration
                     replay_buffer_size=10000,
                     store_replay_every=5, # how frequent to store experience
                     discount_factor=0.9, # discount future rewards
                     target_update_rate=0.01,
                     reg_param=0.01, # regularization constants
                     max_gradient=5, # max gradient norms
                     double_q_learning=False,
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.q_network     = q_network
    self.replay_buffer = ReplayBuffer(buffer_size=replay_buffer_size)

    # Q learning parameters
    self.batch_size      = batch_size
    self.state_dim       = state_dim
    self.num_actions     = num_actions
    self.exploration     = init_exp
    self.init_exp        = init_exp
    self.final_exp       = final_exp
    self.anneal_steps    = anneal_steps
    self.discount_factor = discount_factor
    self.target_update_rate = target_update_rate
    self.double_q_learning = double_q_learning

    # training parameters
    self.max_gradient = max_gradient
    self.reg_param    = reg_param

    # counters
    self.store_replay_every   = store_replay_every
    self.store_experience_cnt = 0
    self.train_iteration      = 0

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    self.session.run(tf.variables_initializer(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every
Beispiel #50
0
 def testNoVars(self):
   with tf.Graph().as_default():
     self.assertEqual(None, tf.assert_variables_initialized())
Beispiel #51
0
def trainFine(conf,jointTrain=False,resume=True):
    # Parameters
    learning_rate = conf.fine_learning_rate;  
    batch_size = conf.fine_batch_size;        display_step = conf.display_step
    n_input = conf.psz; n_classes = conf.n_classes; dropout = conf.dropout 
    imsz = conf.imsz;   rescale = conf.rescale;     scale = conf.scale
    pool_scale = conf.pool_scale
    
    x0,x1,x2,y,keep_prob = createPlaceHolders(imsz,rescale,scale,pool_scale,n_classes)
    locs_ph = tf.placeholder(tf.float32,[conf.batch_size,n_classes,2])
    learning_rate_ph = tf.placeholder(tf.float32,shape=[])

    weights = initNetConvWeights(conf)
    pred_gradient,layers = net_multi_conv(x0,x1,x2, weights, keep_prob,
                          imsz,rescale,pool_scale)
    
    baseoutname = '%s_%d.ckpt'%(conf.outname,conf.base_training_iters)
    basemodelfile = os.path.join(conf.cachedir,baseoutname)

    sess = tf.Session()
    saver = tf.train.Saver()

    pred = tf.stop_gradient(pred_gradient)
    training_iters = conf.fine_training_iters
    outname = conf.fineoutname
    print("Restoring base model from:" + basemodelfile)
    saver.restore(sess, basemodelfile)
        
    # Construct fine model
    labelT  = multiPawTools.createFineLabelTensor(conf)
    layer1_1 = tf.stop_gradient(layers['base_dict_0']['conv1'])
    layer1_2 = tf.stop_gradient(layers['base_dict_0']['conv2'])
    layer2_1 = tf.stop_gradient(layers['base_dict_1']['conv1'])
    layer2_2 = tf.stop_gradient(layers['base_dict_1']['conv2'])
    curfine1_1 = extractPatches(layer1_1,pred,conf,1,4)
    curfine1_2 = extractPatches(layer1_2,pred,conf,2,2)
    curfine2_1 = extractPatches(layer2_1,pred,conf,2,2)
    curfine2_2 = extractPatches(layer2_2,pred,conf,4,1)
    curfine1_1u = tf.unpack(tf.transpose(curfine1_1,[1,0,2,3,4]))
    curfine1_2u = tf.unpack(tf.transpose(curfine1_2,[1,0,2,3,4]))
    curfine2_1u = tf.unpack(tf.transpose(curfine2_1,[1,0,2,3,4]))
    curfine2_2u = tf.unpack(tf.transpose(curfine2_2,[1,0,2,3,4]))
    finepred = fineOut(curfine1_1u,curfine1_2u,curfine2_1u,curfine2_2u,conf)    
    limgs = multiPawTools.createFineLabelImages(locs_ph,pred,conf,labelT)

    # training data stuff
    lmdbfilename =os.path.join(conf.cachedir,conf.trainfilename)
    vallmdbfilename =os.path.join(conf.cachedir,conf.valfilename)
    env = lmdb.open(lmdbfilename, readonly = True)
    valenv = lmdb.open(vallmdbfilename, readonly = True)

    # Define loss and optimizer
    costFine = tf.reduce_mean(tf.nn.l2_loss(finepred- tf.to_float(limgs)))
    costBase =  tf.reduce_mean(tf.nn.l2_loss(pred- y))

    cost = costFine

    saver1 = tf.train.Saver(max_to_keep=conf.maxckpt)
    
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_ph).minimize(cost)

    outfilename = os.path.join(conf.cachedir,conf.fineoutname)
    traindatafilename = os.path.join(conf.cachedir,conf.datafinename)
    latest_ckpt = tf.train.get_checkpoint_state(conf.cachedir,
                                        latest_filename = conf.ckptfinename)
    
    if not latest_ckpt or not resume:
        startat = 0
        trainData = {'train_err':[],'val_err':[],'step_no':[]}
        varlist = tf.all_variables()
        for var in varlist:
            try:
                sess.run(tf.assert_variables_initialized([var]))
            except tf.errors.FailedPreconditionError:
                sess.run(tf.initialize_variables([var]))

    else:
        saver1.restore(latest_ckpt.model_checkpoint_path)
        matchObj = re.match(outfilename + '-(\d*)',ckpt.model_checkpoint_path)
        startat = int(matchObj.group(1)+1)
        tdfile = open(traindatafilename,'rb')
        trainData = pickle.load(tdfile)
        tdfile.close()


#             print('Initializing variable %s'%var.name)
            
#     init = tf.initialize_all_variables()
#     sess.run(init)

    with env.begin() as txn,valenv.begin() as valtxn:
        train_cursor = txn.cursor(); val_cursor = valtxn.cursor()

        # Keep training until reach max iterations
        for step in range(startat,training_iters):
            excount = step*batch_size
            cur_lr = learning_rate *                     conf.gamma**math.floor(old_div(excount,conf.step_size))

            batch_xs, locs = multiPawTools.readLMDB(train_cursor,
                                    batch_size,imsz,multiResData)

            locs = multiResData.sanitize_locs(locs)

            x0_in,x1_in,x2_in = multiPawTools.iScaleImages(
                batch_xs.transpose([0,2,3,1]),rescale,scale)

            labelims = multiPawTools.createLabelImages(locs,
                               conf.imsz,conf.pool_scale*conf.rescale,
                               conf.label_blur_rad) 
            feed_dict={x0: x0_in,x1: x1_in,x2: x2_in,
                y: labelims, keep_prob: dropout,locs_ph:np.array(locs),
                learning_rate_ph:cur_lr}
            sess.run(optimizer, feed_dict = feed_dict)

            if step % display_step == 0:
                feed_dict={x0: x0_in,x1: x1_in,x2: x2_in,
                    y: labelims, keep_prob: 1.,locs_ph:np.array(locs)}
                train_loss = sess.run([cost,costBase], feed_dict=feed_dict)

                numrep = int(old_div(conf.numTest,conf.batch_size))+1
                acc = 0; loss = 0
                for rep in range(numrep):
                    val_xs, locs = multiPawTools.readLMDB(val_cursor,
                                      batch_size,imsz,multiResData)
                    x0_in,x1_in,x2_in = multiPawTools.multiScaleImages(
                        val_xs.transpose([0,2,3,1]),rescale,scale)

                    labelims = multiPawTools.createLabelImages(locs,
                        conf.imsz,conf.pool_scale*conf.rescale,
                        conf.label_blur_rad)
                    feed_dict={x0: x0_in,x1: x1_in,x2: x2_in,
                        y: labelims, keep_prob:1.,locs_ph:np.array(locs)}
                    loss += sess.run(cost, feed_dict=feed_dict)
                loss = old_div((old_div(loss,numrep)),batch_size)
                print("Iter " + str(step) +                 "  Minibatch Loss= " + "{:.3f}".format(loss) +                  ", Training Loss= " + "{:.3f}".format(old_div(train_loss[0],batch_size)) +                  ", Base Training Loss= " + "{:.3f}".format(old_div(train_loss[1],batch_size)))
                trainData['train_err'].append(old_div(train_loss[0],batch_size))
                trainData['val_err'].append(loss)
                trainData['step_no'].append(step)

            if step % conf.save_step == 0:
                saver1.save(sess,outfilename,global_step=step,
                           latest_filename = conf.ckptfinename)
                print('Saved state to %s-%d' %(outfilename,step))
                tdfile = open(traindatafilename,'wb')
                pickle.dump(trainData,tdfile)
                tdfile.close()
#             if step % conf.save_step == 0:
#                 curoutname = '%s_%d.ckpt'% (outname,step)
#                 outfilename = os.path.join(conf.cachedir,curoutname)
#                 saver1.save(sess,outfilename)
#                 print('Saved state to %s' %(outfilename))

            step += 1
            
        print("Optimization Finished!")
        saver1.save(sess,outfilename,global_step=step,
                   latest_filename = conf.ckptfinename)
        print('Saved state to %s-%d' %(outfilename,step))
        tdfile = open(traindatafilename,'wb')
        pickle.dump(trainData,tdfile)
        tdfile.close()
    
    sess.close()
 def testPrepareSessionSucceeds(self):
     with tf.Graph().as_default():
         v = tf.Variable([1.0, 2.0, 3.0], name="v")
         sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
         sess = sm.prepare_session("", init_op=tf.initialize_all_variables())
         self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))