コード例 #1
0
ファイル: gen.py プロジェクト: wowowoxuan/texture_generation
def generate(sample_image):
    start_time = time.time()

    g = ModelGraph()

    with tf.Session() as sess:
        # We need to initialize variables in this case because the Variable `generator/x` will not restored.
        tf.sg_init(sess)

        vars = [v for v in tf.global_variables() if "generator" not in v.name]
        saver = tf.train.Saver(vars)
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        i = 0
        while True:
            mse, _ = sess.run([g.mse, g.train_gen],
                              {g.y: transform_image(sample_image)})  # (16, 28)

            if time.time() - start_time > 60:  # Save every 60 seconds
                gen_image = sess.run(g.x)
                gen_image = np.squeeze(gen_image)
                misc.imsave('gen_images/%s/gen_%.2f.jpg' % (label, mse),
                            gen_image)

                start_time = time.time()
                i += 1
                if i == 60: break  # Finish after 1 hour
コード例 #2
0
ファイル: train.py プロジェクト: fanlu/speech-to-text-wavenet
def get_loss(opt):
    # encode audio feature
    logit = get_logit(opt.input[opt.gpu_index], voca_size=voca_size)
    for i in tf.get_collection("regularization_losses"):
        print(i)
    print('--------------------')

    train_list = tf.trainable_variables()

    var_list = tf.global_variables()
    real_var_list = []
    for item in var_list:
        # print(item)
        if 'W' in item.name:
            real_var_list.append(item)

    loss = logit.sg_ctc(target=opt.target[opt.gpu_index],
                        seq_len=opt.seq_len[opt.gpu_index])
    # print(loss)
    # tf.add_to_collection("losses", loss)
    # losses = tf.get_collection("losses")
    # losses += tf.get_collection("regularization_losses")
    # for i in tf.get_collection("losses"):
    #   print(i.name)
    # print('++++++++++++++++++++')
    # total_loss = tf.add_n(losses, name='total_loss')
    # for item in real_var_list:
    #   loss += 0.03 * tf.nn.l2_loss(item)

    # for i in tf.get_collection("regularization_losses"):
    #   loss += 0.03 * i

    regular_loss = tf.sg_regularizer_loss(0.03)
    loss += regular_loss
    return loss
コード例 #3
0
def discriminator(tensor):

    # reuse flag
    reuse = len([
        t for t in tf.global_variables() if t.name.startswith('discriminator')
    ]) > 0

    with tf.sg_context(name='discriminator',
                       size=4,
                       stride=2,
                       act='leaky_relu',
                       reuse=reuse):
        # shared part
        shared = (tensor.sg_conv(dim=64, name='conv1').sg_conv(
            dim=128, name='conv2').sg_flatten().sg_dense(dim=1024, name='fc1'))

        # discriminator end
        disc = shared.sg_dense(dim=1, act='linear', name='disc').sg_squeeze()

        # shared recognizer part
        recog_shared = shared.sg_dense(dim=128, name='recog')

        # categorical auxiliary classifier end
        cat = recog_shared.sg_dense(dim=cat_dim, act='linear', name='cat')

        # continuous auxiliary classifier end
        con = recog_shared.sg_dense(dim=con_dim, act='sigmoid', name='con')

        return disc, cat, con
コード例 #4
0
def sg_restore(sess, save_path, category=''):
    r""" Restores previously saved variables.

    Args:
      sess: A `Session` to use to restore the parameters.
      save_path: Path where parameters were previously saved.
      category: A `String` to filter variables starts with given category.

    Returns:

    """
    # to list
    if not isinstance(category, (tuple, list)):
        category = [category]

    # make variable list to load
    var_list = {}
    for cat in category:
        for t in tf.global_variables():
            if t.name.startswith(cat):
                var_list[t.name[:-2]] = t

    # restore parameters
    saver = tf.train.Saver(var_list)
    saver.restore(sess, save_path)
コード例 #5
0
def discriminator(tensor):

    # reuse flag
    reuse = len([
        t for t in tf.global_variables() if t.name.startswith('discriminator')
    ]) > 0

    with tf.sg_context(name='discriminator',
                       size=(4, 1),
                       stride=(2, 1),
                       act='leaky_relu',
                       bn=False,
                       reuse=reuse):
        # shared part
        shared = (tensor.sg_conv(dim=32, name='conv1').sg_conv(
            dim=64, name='conv2').sg_conv(
                dim=128, name='conv3').sg_flatten().sg_dense(dim=1024,
                                                             name='fc1'))

        # discriminator end
        disc = shared.sg_dense(dim=1, act='linear', bn=False,
                               name='disc').sg_squeeze()

        # continuous auxiliary classifier end
        con = (shared.sg_dense(dim=128, name='recog').sg_dense(dim=con_dim,
                                                               act='sigmoid',
                                                               bn=False,
                                                               name='con'))

        return disc, con
コード例 #6
0
ファイル: model.py プロジェクト: wolfhu/ebgan
def discriminator(x):

    reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
    with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu', bn=True, reuse=reuse):
        res = (x.sg_conv(dim=64, name='conv_1')
                .sg_conv(dim=128, name='conv_2')
                .sg_upconv(dim=64, name='conv_3')
                .sg_upconv(dim=1, act='linear', name='conv_4'))

    return res
コード例 #7
0
def discriminator(tensor):
    # reuse flag
    reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
    with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu', reuse=reuse):
        res = (tensor
               .sg_conv(dim=64, name='conv1')
               .sg_conv(dim=128, name='conv2')
               .sg_flatten()
               .sg_dense(dim=1024, name='fc1')
               .sg_dense(dim=1, act='linear', name='fc2')
               .sg_squeeze())
        return res
コード例 #8
0
ファイル: model.py プロジェクト: wolfhu/ebgan
def generator(x):

    reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0
    with tf.sg_context(name='generator', size=4, stride=2, act='leaky_relu', bn=True, reuse=reuse):

        # generator network
        res = (x.sg_dense(dim=1024, name='fc_1')
               .sg_dense(dim=7*7*128, name='fc_2')
               .sg_reshape(shape=(-1, 7, 7, 128))
               .sg_upconv(dim=64, name='conv_1')
               .sg_upconv(dim=1, act='sigmoid', bn=False, name='conv_2'))
    return res
コード例 #9
0
def generator(tensor):

    # reuse flag
    reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0

    with tf.sg_context(name='generator', size=4, stride=2, act='relu', bn=True, reuse=reuse):
        res = (tensor
               .sg_dense(dim=1024, name='fc1')
               .sg_dense(dim=7*7*128, name='fc2')
               .sg_reshape(shape=(-1, 7, 7, 128))
               .sg_upconv(dim=64, name='conv1')
               .sg_upconv(dim=1, act='sigmoid', bn=False, name='conv2'))
    return res
コード例 #10
0
def discriminator(tensor):
    reuse = len([
        t for t in tf.global_variables() if t.name.startswith('discriminator')
    ]) > 0
    with tf.sg_context(name='discriminator',
                       size=4,
                       stride=2,
                       act='leaky_relu',
                       bn=True,
                       reuse=reuse):
        res = (tensor.sg_dense(dim=4096, name='fc1').sg_dense(
            dim=512, name='fc2').sg_dense(dim=1,
                                          act='sigmoid',
                                          bn=False,
                                          name='fc3').sg_squeeze())
        return res
コード例 #11
0
def generator(tensor):
    reuse = len(
        [t
         for t in tf.global_variables() if t.name.startswith('generator')]) > 0
    with tf.sg_context(name='generator',
                       size=4,
                       stride=2,
                       act='leaky_relu',
                       bn=True,
                       reuse=reuse):
        res = (tensor.sg_dense(dim=1024, name='fc1').sg_dense(dim=4096,
                                                              act='relu',
                                                              bn=False,
                                                              name='fc3'))

        return res
コード例 #12
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[1070, 3], [1070, 3]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    print(x.shape, y.shape)
    x = np.loadtxt('EM.txt', dtype='float32') / 1500
    y = np.loadtxt('FM.txt', dtype='float32')[:, :100] / 1500
    x = tf.convert_to_tensor(np.expand_dims(np.rollaxis(x, axis=0), axis=0))
    y = tf.convert_to_tensor(np.expand_dims(np.rollaxis(y, axis=0), axis=0))

    print(x.shape, y.shape)

    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
コード例 #13
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[91, 2], [91, 2]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
コード例 #14
0
    def wrapper(tensor, **kwargs):
        r"""Manages arguments of `tf.sg_opt`.

        Args:
          tensor: automatically passed by decorator
          kwargs:
              in_dim: An integer. The size of input dimension, which is set to the last one by default.
              dim: An integer. The size of output dimension. Has the same value as in_dim by default.
              ln: Boolean. If True, layer normalization is applied.
              bias: Boolean. If True, biases are added. As a default, it is set to True
              name: A name for the layer. As a default, the function name is assigned.
              reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
                as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
        """

        # kwargs parsing
        opt = tf.sg_opt(kwargs) + _context

        # set default argument
        try:
            shape = tensor.get_shape().as_list()
            # dropout off
            opt += tf.sg_opt(shape=shape,
                             in_dim=shape[-1],
                             dim=shape[-1],
                             dout=0)
            # disable bias when normalization on
            opt += tf.sg_opt(bias=not opt.ln)
        finally:
            pass

        # automatic layer naming
        if opt.name is None:

            # layer function name will be used as layer name
            opt.name = func.__name__.replace('sg_', 'lyr-')

            # find existing layer names
            exist_layers = []
            for t in tf.global_variables():
                scope_name = tf.get_variable_scope().name
                prefix = scope_name + '/' if len(scope_name) > 0 else ''
                i = t.name.rfind(prefix + opt.name)
                if i >= 0:
                    exist_layers.append(t.name[i:].split('/')[-2])
            exist_layers = list(set(exist_layers))

            # layer name numbering
            if len(exist_layers) == 0:
                opt.name += '_1'
            else:
                opt.name += '_%d' % (
                    max([int(n.split('_')[-1]) for n in exist_layers]) + 1)

        # all layer variables start with 'lyr-' prefix
        with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:

            # call layer function
            out = func(tensor, opt)

            # apply dropout
            if opt.dout:
                out = tf.cond(_phase, lambda: tf.nn.dropout(out, 1 - opt.dout),
                              lambda: out)

            # rename tensor
            out = tf.identity(out, 'out')

            # add final output summary
            if scope.reuse:
                tf.sg_summary_activation(out)

            # save node info for reuse
            out._sugar = tf.sg_opt(func=func,
                                   arg=tf.sg_opt(kwargs) + _context,
                                   prev=tensor,
                                   is_layer=True,
                                   name=opt.name)
            # inject reuse function
            out.sg_reuse = types.MethodType(sg_reuse, out)

        return out
コード例 #15
0
    def wrapper(tensor, **kwargs):
        r"""Manages arguments of `tf.sg_opt`.
        
        Args:
          tensor: A `tensor` (automatically passed by decorator).
          kwargs:
            shape:  A list of integers. The shape of `tensor`. Inferred if not specified.
            in_dim: An integer. The size of input dimension, which is set to the last one by default.
            dim: An integer. The size of output dimension. Has the same value as in_dim by default.
            bn: Boolean. If True, batch normalization is applied.
            ln: Boolean. If True, layer normalization is applied.
            dout: A float of range [0, 100). A dropout rate. Set to 0 by default.
            bias: Boolean. If True, biases are added. As a default, it is set to True 
            name: A name for the layer. As a default, the function name is assigned.
            act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
            reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope 
              as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
        """

        from . import sg_initializer as init
        from . import sg_activation

        # kwargs parsing
        opt = tf.sg_opt(kwargs) + _context

        # set default argument
        try:
            shape = tensor.get_shape().as_list()
            # batch normalization off, layer normalization off, dropout off
            opt += tf.sg_opt(shape=shape,
                             in_dim=shape[-1],
                             dim=shape[-1],
                             bn=False,
                             ln=False,
                             dout=0)
            assert not (
                opt.bn and opt.ln
            ), 'one of batch normalization and layer normalization is available.'

            # disable bias when normalization on
            opt += tf.sg_opt(bias=not (opt.bn or opt.ln))
        finally:
            pass

        # automatic layer naming
        if opt.name is None:

            # layer function name will be used as layer name
            opt.name = func.__name__.replace('sg_', '')

            # find existing layer names
            exist_layers = []
            for t in tf.global_variables():
                scope_name = tf.get_variable_scope().name
                prefix = scope_name + '/' if len(scope_name) > 0 else ''
                i = t.name.rfind(prefix + opt.name)
                if i >= 0:
                    exist_layers.append(t.name[i:].split('/')[-2])
            exist_layers = list(set(exist_layers))

            # layer name numbering
            if len(exist_layers) == 0:
                opt.name += '_1'
            else:
                opt.name += '_%d' % (
                    max([int(n.split('_')[-1]) for n in exist_layers]) + 1)

        # all layer variables start with 'lyr-' prefix
        with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:

            # call layer function
            out = func(tensor, opt)

            # apply batch normalization
            if opt.bn:
                # offset, scale parameter
                beta = init.constant('beta', opt.dim, summary=False)
                gamma = init.constant('gamma', opt.dim, value=1, summary=False)

                # offset, scale parameter
                mean_running = init.constant('mean', opt.dim, summary=False)
                variance_running = init.constant('variance',
                                                 opt.dim,
                                                 value=1,
                                                 summary=False)

                # calc batch mean, variance
                mean, variance = tf.nn.moments(
                    out, axes=range(len(out.get_shape()) - 1))

                # update running mean, variance
                def update_running_stat():
                    decay = 0.99
                    update_op = [
                        mean_running.assign(mean_running * decay + mean *
                                            (1 - decay)),
                        variance_running.assign(variance_running * decay +
                                                variance * (1 - decay))
                    ]
                    with tf.control_dependencies(update_op):
                        return tf.identity(mean), tf.identity(variance)

                # select mean, variance by training phase
                m, v = tf.cond(
                    _phase,
                    update_running_stat,  # updated running stat and batch mean, variance
                    lambda:
                    (mean_running, variance_running))  # saved mean, variance

                # apply batch normalization
                out = tf.nn.batch_normalization(out, m, v, beta, gamma,
                                                tf.sg_eps)

            # apply normalization parameters
            if opt.ln:
                # offset, scale parameter
                beta = init.constant('beta', opt.dim, summary=False)
                gamma = init.constant('gamma', opt.dim, value=1, summary=False)

                # calc layer mean, variance for final axis
                mean, variance = tf.nn.moments(out,
                                               axes=[len(out.get_shape()) - 1],
                                               keep_dims=True)

                # apply normalization
                out = (out - mean) / tf.sqrt(variance + tf.sg_eps)
                # apply parameter
                out = gamma * out + beta

            # apply activation
            if opt.act:
                out = getattr(sg_activation, 'sg_' + opt.act.lower())(out)

            # apply dropout
            if opt.dout:
                out = tf.cond(_phase, lambda: tf.nn.dropout(out, 1 - opt.dout),
                              lambda: out)

            # rename tensor
            out = tf.identity(out, 'out')

            # add final output summary
            if not scope.reuse:
                tf.sg_summary_activation(out)

            # save node info for reuse
            out._sugar = tf.sg_opt(func=func,
                                   arg=tf.sg_opt(kwargs) + _context,
                                   prev=tensor,
                                   is_layer=True,
                                   name=opt.name)
            # inject reuse function
            out.sg_reuse = types.MethodType(sg_reuse, out)

        return out
コード例 #16
0
ファイル: sg_main.py プロジェクト: DevashishJoshi/sugartensor
    def wrapper(tensor, **kwargs):
        r"""Manages arguments of `tf.sg_opt`.

        Args:
          tensor: A `tensor` (automatically passed by decorator).
          kwargs:
            shape:  A list of integers. The shape of `tensor`. Inferred if not specified.
            in_dim: An integer. The size of input dimension, which is set to the last one by default.
            dim: An integer. The size of output dimension. Has the same value as in_dim by default.
            bn: Boolean. If True, batch normalization is applied.
            ln: Boolean. If True, layer normalization is applied.
            scale: If true, multiple by a trainable gamma variable. When the activation is
              linear (relu included), this can be disabled because it can be implicitly
              learned by the next layer. The default is True.
            dout: A float of range [0, 100). A dropout rate. Set to 0 by default.
            bias: Boolean. If True, biases are added. As a default, it is set to True
            name: A name for the layer. As a default, the function name is assigned.
            act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
            reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
              as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
            regularizer:  A string. None, 'l1' or 'l2'. The default is None
            summary: If True, summaries are added. The default is True.
        """

        from . import sg_initializer as init
        from . import sg_activation

        # kwargs parsing
        opt = tf.sg_opt(kwargs) + sg_get_context()

        # set default argument
        try:
            shape = tensor.get_shape().as_list()
            # batch normalization off, layer normalization off, dropout off
            opt += tf.sg_opt(shape=shape,
                             in_dim=shape[-1],
                             dim=shape[-1],
                             bn=False,
                             ln=False,
                             dout=0,
                             summary=True,
                             scale=True)
            if opt.regularizer == 'l1':
                opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x))
            elif opt.regularizer == 'l2':
                opt.regularizer = lambda x: tf.square(
                    tf.reduce_mean(tf.square(x)))
            else:
                opt.regularizer = None

            assert not (
                opt.bn and opt.ln
            ), 'one of batch normalization and layer normalization is available.'

            # disable bias when normalization on
            opt += tf.sg_opt(bias=not (opt.bn or opt.ln))
        finally:
            pass

        # automatic layer naming
        if opt.name is None:

            # layer function name will be used as layer name
            opt.name = func.__name__.replace('sg_', '')

            # find existing layer names
            exist_layers = []
            for t in tf.global_variables():
                scope_name = tf.get_variable_scope().name
                prefix = scope_name + '/' if len(scope_name) > 0 else ''
                i = t.name.rfind(prefix + opt.name)
                if i >= 0:
                    exist_layers.append(t.name[i:].split('/')[-2])
            exist_layers = list(set(exist_layers))

            # layer name numbering
            if len(exist_layers) == 0:
                opt.name += '_1'
            else:
                opt.name += '_%d' % (
                    max([int(n.split('_')[-1]) for n in exist_layers]) + 1)

        with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:

            # call layer function
            out = func(tensor, opt)
            out_shape = out.get_shape()

            # apply batch normalization
            if opt.bn:
                beta = init.constant('beta', opt.dim, summary=opt.summary)
                gamma = init.constant('gamma',
                                      opt.dim,
                                      value=1,
                                      summary=opt.summary,
                                      trainable=opt.scale)

                # offset, scale parameter ( for inference )
                mean_running = init.constant('mean',
                                             opt.dim,
                                             trainable=False,
                                             summary=opt.summary)
                variance_running = init.constant('variance',
                                                 opt.dim,
                                                 value=1,
                                                 trainable=False,
                                                 summary=opt.summary)

                # use fused batch norm if ndims in [2, 3, 4]
                if out_shape.ndims in [2, 3, 4]:
                    # add HW dims if necessary, fused_batch_norm requires shape to be NHWC
                    if out_shape.ndims == 2:
                        out = tf.expand_dims(out, axis=1)
                        out = tf.expand_dims(out, axis=2)
                    elif out_shape.ndims == 3:
                        out = tf.expand_dims(out, axis=2)

                    fused_eps = tf.sg_eps if tf.sg_eps > 1e-5 else 1e-5
                    out, mean, variance = tf.cond(
                        _phase,
                        lambda: tf.nn.fused_batch_norm(
                            out, gamma, beta, epsilon=fused_eps),
                        lambda: tf.nn.fused_batch_norm(out,
                                                       gamma,
                                                       beta,
                                                       mean=mean_running,
                                                       variance=
                                                       variance_running,
                                                       epsilon=fused_eps,
                                                       is_training=False),
                    )

                    # restore original shape if HW dims was added
                    if out_shape.ndims == 2:
                        out = tf.squeeze(out, axis=[1, 2])
                    elif out_shape.ndims == 3:
                        out = tf.squeeze(out, axis=2)

                # fallback to naive batch norm
                else:
                    mean, variance = tf.nn.moments(
                        out, axes=list(range(len(out.get_shape()) - 1)))
                    out = tf.cond(
                        _phase, lambda: tf.nn.batch_normalization(
                            out, mean, variance, beta, gamma, tf.sg_eps),
                        lambda: tf.nn.batch_normalization(
                            out, mean_running, variance_running, beta, gamma,
                            tf.sg_eps))

                decay = 0.99
                tf.add_to_collection(
                    tf.GraphKeys.UPDATE_OPS,
                    mean_running.assign(mean_running * decay + mean *
                                        (1 - decay)))
                tf.add_to_collection(
                    tf.GraphKeys.UPDATE_OPS,
                    variance_running.assign(variance_running * decay +
                                            variance * (1 - decay)))

            # apply layer normalization
            if opt.ln:
                # offset, scale parameter
                beta = init.constant('beta', opt.dim, summary=opt.summary)
                if opt.scale:
                    gamma = init.constant('gamma',
                                          opt.dim,
                                          value=1,
                                          summary=opt.summary)

                # calc layer mean, variance for final axis
                mean, variance = tf.nn.moments(out,
                                               axes=[len(out.get_shape()) - 1],
                                               keep_dims=True)

                # apply normalization
                out = (out - mean) / tf.sqrt(variance + tf.sg_eps)
                # apply parameter
                if opt.scale:
                    out = gamma * out + beta
                else:
                    out = out + beta

            # apply activation
            if opt.act:
                out = getattr(sg_activation, 'sg_' + opt.act.lower())(out)

            # apply dropout
            if opt.dout:
                out = tf.cond(_phase, lambda: tf.nn.dropout(out, 1 - opt.dout),
                              lambda: out)

            # rename tensor
            out = tf.identity(out, 'out')

            # add final output summary
            if opt.summary:
                tf.sg_summary_activation(out)

            # save node info for reuse
            out._sugar = tf.sg_opt(func=func,
                                   arg=tf.sg_opt(kwargs) + sg_get_context(),
                                   prev=tensor,
                                   is_layer=True,
                                   name=opt.name)
            # inject reuse function
            out.sg_reuse = types.MethodType(sg_reuse, out)

        return out
コード例 #17
0
ファイル: train.py プロジェクト: zikai1/CPD-Net
def train():
    tf.Graph()
    tf.set_random_seed(888)
    print("*****************************************")
    print("Training started with random seed: {}".format(111))
    print("Batch started with random seed: {}".format(111))
    
    #read data
    x,y=read_from_tfrecords(tfname,
                                 ["source","target"], batSize, [[s1,2],[s2,2]])
    global_step = tf.Variable(1, trainable=False,name='global_step')
    yp=Net(x,x,y)+x    
    Loss=chamfer_loss(yp,y)    

    #Learning Rate****************************************************************************
    lr = tf.train.exponential_decay(learningRate, global_step,
                                                  batSize, learningRateDecay, staircase=False) 
    # Optimization Algo************************************************************************
    train_step = tf.train.AdamOptimizer(learning_rate=lr,
                                                    beta1=adam_beta1,
                                                    beta2=adam_beta2
                                                   ).minimize(Loss,global_step=global_step)
    
    saver = tf.train.Saver(max_to_keep=int(maxKeepWeights))
    init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
    
    # Continue Training************************************************************************
    if len(conWeightPath)>0:
        print("Continue Training...")
        tmp_var_list={}
        if len(conWeightVar)==0:
            print("For all variables")
            globals()['conWeightVar']={''}
        else:
            print("Training variables: {}".format(conWeightVar))
            
        for j in conWeightVar: 
            for i in tf.global_variables():
                if i.name.startswith(j):
                    tmp_var_list[i.name[:-2]] = i      
        saver1=tf.train.Saver(tmp_var_list)     
    
    # Training**********************************************************************************    
    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # Read Weight******************************
        if len(conWeightPath)>0:
            print(conWeightPath)
            if stepsContinue==-1:            
                STEPS=sorted([int(i.split("/")[-1].split(".")[1].split("-")[-1]) for i in glob.glob(conWeightPath+"/*meta")])
                print("hahaha",STEPS)
                globals()['stepsContinue']=STEPS[-1]
                
            wtt=glob.glob(conWeightPath+"/*{}*meta".format(stepsContinue))[0][:-5]
            print("Reading Weight:{}".format(wtt))
            saver1.restore(sess,wtt)
            print('Weight is successfully updated from: {}'.format(wtt))  
        #*******************************************    
        stepst = sess.run(global_step)
        for t in tqdm.tqdm(range(stepst,int(maxStep)+1)):      
            _= sess.run([train_step]) 
            if t % saveStep==0:
                if not os.path.exists(dirSave):
                    os.makedirs(dirSave)
                saver.save(sess, dirSave + '/model.ckpt', global_step=t)
        coord.request_stop()
        coord.join(threads)