Ejemplo n.º 1
0
def run_generator(fig_name, cval):

    with tf.Session() as sess:

        # init session
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

        feed_dic = {}
        for t, c in zip(target_cval, cval):
            feed_dic[t] = c

        # run generator
        imgs = sess.run(gen, feed_dic)

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].plot(imgs[i * 10 + j])
                ax[i][j].set_axis_off()
        plt.savefig('asset/train/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/%s"' % fig_name)
        plt.close()
Ejemplo n.º 2
0
    def __init__(self,
                 batch_size=16,
                 data_path='/home/murugesan/VCTK-Corpus/',
                 vocabulary_loading=False):
        if vocabulary_loading:
            vocabulary_file = 'muru_works' + self.__class__.__name__ + 'vocabulary.npy'
            if os.path.exists(vocabulary_file):
                self.index2byte = np.load(vocabulary_file)
                self.byte2index = {}
                for i, b in enumerate(self.index2byte):
                    self.byte2index[b] = i
                self.voca_size = len(self.index2byte)
                tf.sg_info('Vocabulary loaded')
                return
        labels, wave_files = self.load_corpus(data_path)

        label = theano.tensor.as_tensor_variable(labels)
        wave_file = theano.tensor.as_tensor_variable(wave_file)
        label, wave_file = tf.train.slice_input_producer([labels, wave_file],
                                                         shuffle=True)

        def load_mfcc(self, source):
            lable, wav = source
            lab = np.fromstring(lable, np.int)
            mfcc = mfcc.mfcc_generate(wav)
            return lab, mfcc
Ejemplo n.º 3
0
    def __init__(self, batch_size=32, name='train'):

        # load train corpus
        sources, targets = self._load_corpus(mode='train')

        # to constant tensor
        source = tf.convert_to_tensor(sources)
        target = tf.convert_to_tensor(targets)

        # create queue from constant tensor
        source, target = tf.train.slice_input_producer([source, target])

        # create batch queue
        batch_queue = tf.train.shuffle_batch([source, target],
                                             batch_size,
                                             num_threads=32,
                                             capacity=batch_size * 64,
                                             min_after_dequeue=batch_size * 32,
                                             name=name)

        # split data
        self.source, self.target = batch_queue

        # calc total batch count
        self.num_batch = len(sources) // batch_size

        # print info
        tf.sg_info('Train data loaded.(total data=%d, total batch=%d)' %
                   (len(sources), self.num_batch))
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:

        tf.sg_init(sess)

        # restore parameters
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/infogan'),
                      category='generator')

        # run generator
        imgs = sess.run(gen, {
            target_num: num,
            target_cval_1: x1,
            target_cval_2: x2
        })

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].imshow(imgs[i * 10 + j], 'gray')
                ax[i][j].set_axis_off()
        plt.savefig('asset/train/infogan/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/infogan/%s"' % fig_name)
        plt.close()
Ejemplo n.º 5
0
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:
        tf.sg_init(sess)
        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        # run generator
        imgs = sess.run(gen, {target_num: num,
                              target_cval_1: x1,
                              target_cval_2: x2})

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].plot(imgs[i * 10 + j, :, 0], color='b', linewidth=0.25)
                # Turn off tick labels only
                # ax[i][j].set_axis_off()
                ax[i][j].set_xticks([])
                ax[i][j].set_yticks([])

        plt.savefig('asset/train/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/%s"' % fig_name)
        plt.close()
Ejemplo n.º 6
0
def build_input2(batch_size=16, set_name='train_sort', mode='train'):

    # load meta file
    label, mfcc_file, len1 = [], [], []

    with open(_data_path + 'preprocess/meta/%s.csv' % set_name) as csv_file:
        reader = csv.reader(csv_file, delimiter=',')
        for i, row in enumerate(reader):
            # mfcc file
            mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] + '.npy')
            # label info ( convert to string object for variable-length support )
            #print 'shape is:', np.shape(label_temp), np.shape(indices)
            str1 = np.asarray(row[1:], dtype=np.int32)
            len_buf = np.asarray([len(str1)], dtype=np.int32)
            label.append(str1.tostring())
            #print str1, len_buf
            len1.append(len_buf.tostring())
            if i >= 1999:
                break
    print np.shape(len1)
    # to constant tensor
    label_t = tf.convert_to_tensor(label)
    mfcc_file_t = tf.convert_to_tensor(mfcc_file)
    len_t = tf.convert_to_tensor(len1)

    # create queue from constant tensor
    label_q, mfcc_file_q, len_q \
        = tf.train.slice_input_producer([label_t, mfcc_file_t, len_t], shuffle=False)

    # create label, mfcc queue
    label_q, mfcc_q, len_q = _load_mfcc(
        source=[label_q, mfcc_file_q, len_q],
        dtypes=[tf.int32, tf.float32, tf.int32],
        capacity=batch_size * 16,
        num_threads=16)

    # create batch queue with dynamic pad
    label_, mfcc_, len_ = tf.train.batch([label_q, mfcc_q, len_q],
                                         batch_size,
                                         shapes=[(None, ), (20, None), (1, )],
                                         num_threads=16,
                                         capacity=batch_size * 16,
                                         dynamic_pad=True,
                                         allow_smaller_final_batch=False)

    #label_ = tf.deserialize_many_sparse(label_, tf.int32)
    # batch * time * dim
    mfcc_ = tf.transpose(mfcc_, perm=[0, 2, 1])
    # calc total batch count
    num_batch = len(label) // batch_size
    len_ = tf.reshape(len_, [-1])
    assert label_.get_shape()[0] == batch_size
    #convert to sparse tensor

    # print info
    stf.sg_info('%s set loaded.(total data=%d, total batch=%d)' %
                (set_name.upper(), len(label), num_batch))
    return mfcc_, label_, len_
Ejemplo n.º 7
0
    def __init__(self, batch_size=16, data_path='asset/data/', vocabulary_loading=False):

        @tf.sg_producer_func
        def _load_mfcc(src_list):
            lab, wav = src_list  # label, wave_file
            # decode string to integer
            lab = np.fromstring(lab, np.int)
            # load wave file
            wav, sr = librosa.load(wav, mono=True)
            # mfcc
            mfcc = librosa.feature.mfcc(wav, sr)
            # return result
            return lab, mfcc

        # path for loading just vocabulary
        if vocabulary_loading:
            vocabulary_file = __vocabulary_save_dir__ + self.__class__.__name__ + '_vocabulary.npy'
            if os.path.exists(vocabulary_file):
                self.index2byte = np.load(vocabulary_file)
                self.byte2index = {}
                for i, b in enumerate(self.index2byte):
                    self.byte2index[b] = i
                self.voca_size = len(self.index2byte)
                tf.sg_info('VCTK vocabulary loaded.')
                return

        # load corpus
        labels, wave_files = self._load_corpus(data_path)

        # to constant tensor
        label = tf.convert_to_tensor(labels)
        wave_file = tf.convert_to_tensor(wave_files)

        # create queue from constant tensor
        label, wave_file = tf.train.slice_input_producer([label, wave_file], shuffle=True)

        # decode wave file
        label, mfcc = _load_mfcc(source=[label, wave_file], dtypes=[tf.sg_intx, tf.sg_floatx],
                                 capacity=128, num_threads=32)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label, mfcc], batch_size,
                                     shapes=[(None,), (20, None)],
                                     num_threads=32, capacity=batch_size*48,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])

        # calc total batch count
        self.num_batch = len(labels) // batch_size

        # print info
        tf.sg_info('VCTK corpus loaded.(total data=%d, total batch=%d)' % (len(labels), self.num_batch))
Ejemplo n.º 8
0
def plot_images(imgs):
    # plot result
    _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
    for i in range(10):
        for j in range(10):
            ax[i][j].imshow(imgs[i * 10 + j], 'gray')
            ax[i][j].set_axis_off()
    plt.savefig(get_next_filename(), dpi=600)
    tf.sg_info('Sample image saved to "asset/train/sample.png"')
    plt.close()
Ejemplo n.º 9
0
    def __init__(self, batch_size=16, data_path='asset/data/', mode='train'):

        @tf.sg_producer_func
        def _load_mfcc(src_list):
            lab, wav = src_list  # label, wave_file
            # decode string to integer
            lab = np.fromstring(lab, np.int)
            # load wave file
            wav, sr = librosa.load(wav, mono=True)
            # mfcc
            hl = 512
            mfcc = librosa.feature.mfcc(wav, sr, n_mfcc=40,hop_length=hl)
            mfcc = mfcc[:,:100]
            # return result
            return lab, mfcc

        print("Mode: %s" % mode)

        # load corpus
        labels, wave_files, accent_labels = self._load_corpus(data_path, mode=='train')
        labels = accent_labels
        labels = np.array(labels)

        self.labels = labels
        self.wave_files = wave_files

        # to constant tensor
        label = tf.convert_to_tensor(labels)
        #label = tf.convert_to_tensor(accent_labels)

        wave_file = tf.convert_to_tensor(wave_files)

        # create queue from constant tensor
        label, wave_file = tf.train.slice_input_producer([label, wave_file], shuffle=True)

        # decode wave file
        label, mfcc = _load_mfcc(source=[label, wave_file], dtypes=[tf.sg_intx, tf.sg_floatx],
                                 capacity=128, num_threads=32)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label, mfcc], batch_size,
                                     shapes=[(None,), (40, None)],
                                     num_threads=32, capacity=batch_size*48,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])

        # calc total batch count
        self.num_batch = len(labels) // batch_size

        # print info
        tf.sg_info('VCTK corpus loaded.(total data=%d, total batch=%d)' % (len(labels), self.num_batch))
Ejemplo n.º 10
0
    def __init__(self, batch_size=16, set_name='train'):

        # load meta file
        label, mfcc_file, filenames = [], [], []

        with open(_data_path +
                  'preprocess/meta/%s.csv' % set_name) as csv_file:
            reader = csv.reader(csv_file, delimiter=',')
            for row in reader:
                # mfcc file
                filenames.append(row[0])

                mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] +
                                 '.npy')
                #mfcc_file.append(_data_path + 'preprocess/mfcc-one/' + row[0] + '.npy')
                # label info ( convert to string object for variable-length support )
                label.append(np.asarray(row[1:], dtype=np.int).tostring())

        # to constant tensor
        label_t = tf.convert_to_tensor(label)
        mfcc_file_t = tf.convert_to_tensor(mfcc_file)
        filenames_t = tf.convert_to_tensor(filenames)

        # create queue from constant tensor
        label_q, mfcc_file_q, filenames_q \
            = tf.train.slice_input_producer([label_t, mfcc_file_t, filenames_t], shuffle=False)
        #= tf.train.slice_input_producer([label_t, mfcc_file_t, filenames_t], shuffle=True)

        # create label, mfcc queue
        label_q, mfcc_q, filenames_q = _load_mfcc(
            source=[label_q, mfcc_file_q, filenames_q],
            dtypes=[tf.sg_intx, tf.sg_floatx, tf.string],
            capacity=256,
            num_threads=64)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label_q, mfcc_q, filenames_q],
                                     batch_size,
                                     shapes=[(None, ), (20, None), ()],
                                     num_threads=64,
                                     capacity=batch_size * 32,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc, self.filenames = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
        # calc total batch count
        self.num_batch = len(label) // batch_size

        # print info
        tf.sg_info('%s set loaded.(total data=%d, total batch=%d)' %
                   (set_name.upper(), len(label), self.num_batch))
Ejemplo n.º 11
0
    def __init__(self,
                 batch_size=16,
                 set_name='learning_batch.csv',
                 data_path="MFCC/"):

        self._data_path = data_path

        # load meta file
        label, mfcc_file = [], []
        csv_file = pd.read_csv("learning_batch.csv", header=None)
        for row in csv_file.iterrows():
            file_name, label_temp = row[1]
            label_temp = map(int, label_temp.split())
            mfcc_file.append(self._data_path + file_name)
            # label info ( convert to string object for variable-length support )
            label.append(np.asarray(label_temp, dtype=np.int).tostring())

        # to constant tensor
        label_t = tf.convert_to_tensor(label)
        mfcc_file_t = tf.convert_to_tensor(mfcc_file)

        # create queue from constant tensor
        label_q, mfcc_file_q = tf.train.slice_input_producer(
            [label_t, mfcc_file_t], shuffle=True)

        # create label, mfcc queue
        label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
                                     dtypes=[tf.sg_intx, tf.sg_floatx],
                                     capacity=256,
                                     num_threads=64)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label_q, mfcc_q],
                                     batch_size,
                                     shapes=[(None, ), (20, None)],
                                     num_threads=64,
                                     capacity=batch_size * 32,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
        # calc total batch count
        self.num_batch = len(label) // batch_size

        # print info
        tf.sg_info('%s set loaded.(total data=%d, total batch=%d)' %
                   (set_name.upper(), len(label), self.num_batch))
Ejemplo n.º 12
0
    def __init__(self):
        data, labels = [], []
        with open('data/Housing.csv') as csv_file:
            reader = csv.reader(csv_file)

            i = 0
            for row in reader:
                if i != 0:
                    labels.append(row[1])
                    cols = []
                    for col in row[2:]:
                        if col == 'yes':
                            col = '1'
                        elif col == 'no':
                            col = '0'
                        cols.append(col)
                    data.append(cols)
                i += 1

        label_t = tf.convert_to_tensor(labels)
        data_t = tf.convert_to_tensor(data)

        label_q, data_q\
            = tf.train.slice_input_producer([label_t, data_t], shuffle=True)

        label_q, data_q = _load_data(source=[label_q, data_q],
                                     dtypes=[sg.sg_intx, sg.sg_intx],
                                     capacity=256,
                                     num_threads=64)

        batch_queue = tf.train.batch([label_q, data_q],
                                     batch_size=32,
                                     shapes=[(), (11)],
                                     num_threads=64,
                                     capacity=32 * 32,
                                     dynamic_pad=False)

        self.label, self.data = batch_queue
        self.num_batch = len(labels) // 32

        sg.sg_info('%s set loaded.(total data=%d, total batch=%d)' %
                   ('train', len(labels), self.num_batch))
Ejemplo n.º 13
0
    def __init__(self, batch_size=16, set_name='train'):

        # load meta file
        label, mfcc_file = [], []
        with open(_data_path + 'preprocess/meta/%s.csv' % set_name) as csv_file:
            reader = csv.reader(csv_file, delimiter=',')
            for row in reader:
                # mfcc file
                mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] + '.npy')
                # label info ( convert to string object for variable-length support )
                label.append(np.asarray(row[1:], dtype=np.int).tostring())

        # to constant tensor
        label_t = tf.convert_to_tensor(label)
        mfcc_file_t = tf.convert_to_tensor(mfcc_file)

        # create queue from constant tensor
        label_q, mfcc_file_q \
            = tf.train.slice_input_producer([label_t, mfcc_file_t], shuffle=True)

        # create label, mfcc queue
        label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
                                     dtypes=[tf.sg_intx, tf.sg_floatx],
                                     capacity=256, num_threads=64)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label_q, mfcc_q], batch_size,
                                     shapes=[(None,), (20, None)],
                                     num_threads=64, capacity=batch_size*32,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
        # calc total batch count
        self.num_batch = len(label) // batch_size

        # print info
        tf.sg_info('%s set loaded.(total data=%d, total batch=%d)'
                   % (set_name.upper(), len(label), self.num_batch))
Ejemplo n.º 14
0
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:
        tf.sg_init(sess)
        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        # run generator
        imgs = sess.run(gen, {target_num: num,
                              target_cval_1: x1,
                              target_cval_2: x2})

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].plot(imgs[i * 10 + j, :, 0])
                ax[i][j].plot(imgs[i * 10 + j, :, 1])
                ax[i][j].set_axis_off()
        plt.savefig('asset/train/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/%s"' % fig_name)
        plt.close()
Ejemplo n.º 15
0
#
# run network
#

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

    # init variables
    tf.sg_init(sess)

    # restore parameters
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

    # logging
    tf.sg_info('Testing started on %s set at global step[%08d].' %
            (tf.sg_arg().set.upper(), sess.run(tf.sg_global_step())))

    with tf.sg_queue_context():

        # create progress bar
        iterator = tqdm(range(0, int(data.num_batch * tf.sg_arg().frac)), total=int(data.num_batch * tf.sg_arg().frac),
                        initial=0, desc='test', ncols=70, unit='b', leave=False)

        # batch loop
        loss_avg = 0.
        for _ in iterator:

            # run session
            batch_loss = sess.run(loss)

            # loss history update
Ejemplo n.º 16
0
 def console_log(sess_):
     if epoch >= 0:
         tf.sg_info('\tEpoch[%03d:gs=%d] - loss = %s' %
                    (epoch, sess_.run(tf.sg_global_step()),
                     ('NA' if loss is None else '%8.6f' % loss)))
Ejemplo n.º 17
0
        plt.figure(figsize=(4, 3))
        #plt.set_axis_off()
        hr = plt.imshow(sr[0], 'gray')
        plt.axis('off')
        #ax.set_axis_off()
        #ax.thumbnail(size, Image.ANTIALIAS)
        #for i in range(1):
        #for j in range(1):
        #ax[i][j*4].imshow(low[i*3+j], 'gray')
        #ax[i][j*4].set_axis_off()
        #ax[i][j*4+1].imshow(bicubic[i*3+j], 'gray')
        #ax[i][j*4+1].set_axis_off()
        #ax[i][j*4+2].imshow(sr[i*3+j], 'gray')
        #ax.imshow(sr[0], 'gray')
        #ax.set_axis_off()
        #ax[i][j*4+2].set_axis_off()
        #ax[i][j*4+3].imshow(gt[i*3+j], 'gray')
        #ax[i][j*4+3].set_axis_off()

        #plt.savefig(fig_name,bbox_inches='tight',dpi=300)
        plt.savefig(fig_name, dpi=600)
        tf.sg_info('Sample image saved to "%s"' % fig_name)
        plt.close()

        plt.figure(figsize=(4, 3))
        lr = plt.imshow(gt[0], 'gray')
        plt.axis('off')
        plt.savefig(fig_name2, dpi=600)
        tf.sg_info('Sample image saved to "%s"' % fig_name2)
        plt.close()
Ejemplo n.º 18
0
    def wrapper(**kwargs):
        r""" Manages arguments of `tf.sg_opt`.

        Args:
          **kwargs:
            lr: A Python Scalar (optional). Learning rate. Default is .001.

            save_dir: A string. The root path to which checkpoint and log files are saved.
              Default is `asset/train`.
            max_ep: A positive integer. Maximum number of epochs. Default is 1000.
            ep_size: A positive integer. Number of Total batches in an epoch.
              For proper display of log. Default is 1e5.

            save_interval: A Python scalar. The interval of saving checkpoint files.
              By default, for every 600 seconds, a checkpoint file is written.
            log_interval: A Python scalar. The interval of recoding logs.
              By default, for every 60 seconds, logging is executed.
            max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
            keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.

            eval_metric: A list of tensors containing the value to evaluate. Default is [].

            tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss
                will be shown on the console.
        """
        opt = tf.sg_opt(kwargs)

        # default training options
        opt += tf.sg_opt(lr=0.001,
                         save_dir='asset/train',
                         max_ep=1000, ep_size=100000,
                         save_interval=600, log_interval=60,
                         eval_metric=[],
                         max_keep=5, keep_interval=1,
                         tqdm=True)

        # training epoch and loss
        epoch, loss = -1, None

        # checkpoint saver
        saver = tf.train.Saver(max_to_keep=opt.max_keep,
                               keep_checkpoint_every_n_hours=opt.keep_interval)

        # add evaluation summary
        for m in opt.eval_metric:
            tf.sg_summary_metric(m)

        # summary writer
        log_dir = opt.save_dir + '/run-%02d%02d-%02d%02d' % tuple(tf.time.localtime(tf.time.time()))[1:5]
        summary_writer = tf.summary.FileWriter(log_dir)

        # console logging function
        def console_log(sess_):
            if epoch >= 0:
                tf.sg_info('\tEpoch[%03d:gs=%d] - loss = %s' %
                           (epoch, sess_.run(tf.sg_global_step()),
                            ('NA' if loss is None else '%8.6f' % loss)))

        local_init_op = tf.group(tf.sg_phase().assign(True), tf.tables_initializer(), tf.local_variables_initializer())

        # create supervisor
        sv = tf.train.Supervisor(logdir=opt.save_dir,
                                 saver=saver,
                                 save_model_secs=opt.save_interval,
                                 summary_writer=summary_writer,
                                 save_summaries_secs=opt.log_interval,
                                 global_step=tf.sg_global_step(),
                                 local_init_op=local_init_op)

        # create session
        with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

            # console logging loop
            if not opt.tqdm:
                sv.loop(opt.log_interval, console_log, args=(sess,))

            # get start epoch
            _step = sess.run(tf.sg_global_step())
            ep = _step // opt.ep_size

            best_f1 = 0
            # check if already finished
            if ep <= opt.max_ep:

                # logging
                tf.sg_info('Training started from epoch[%03d]-step[%d].' % (ep, _step))

                # epoch loop
                for ep in range(ep, opt.max_ep + 1):

                    # update epoch info
                    start_step = sess.run(tf.sg_global_step()) % opt.ep_size
                    epoch = ep

                    # create progressbar iterator
                    if opt.tqdm:
                        iterator = tf.tqdm(range(start_step, opt.ep_size), total=opt.ep_size, initial=start_step,
                                           desc='train', ncols=70, unit='b', leave=False)
                    else:
                        iterator = range(start_step, opt.ep_size)

                    # batch loop
                    for _ in iterator:

                        # exit loop
                        if sv.should_stop():
                            break

                        # call train function
                        batch_loss = func(sess, opt)

                        # loss history update
                        if batch_loss is not None and \
                                not np.isnan(batch_loss.all()) and not np.isinf(batch_loss.all()):
                            if loss is None:
                                loss = np.mean(batch_loss)
                            else:
                                loss = loss * 0.9 + np.mean(batch_loss) * 0.1

                    # log epoch information
                    console_log(sess)

                    f1_stat = show_metrics(sv, sess, opt.eval_metric[2], opt.eval_metric[3], ep, opt.val_ep_size,
                                              'val', use_tqdm=True)

                    if f1_stat > best_f1:
                        best_f1 = f1_stat

                        max_model_file = opt.save_dir + max_model_name

                        # save last version
                        saver.save(sess, max_model_file)
                        print("Improved F1 score, max model saved in file: %s" % max_model_file)

                        print('Test metrics:')
                        show_metrics(sv, sess, opt.test_metric[0], opt.test_metric[1], ep, opt.test_ep_size,
                                        'test', use_tqdm=True)

                # save last version
                saver.save(sess, opt.save_dir + '/model.ckpt', global_step=sess.run(tf.sg_global_step()))

                # logging
                tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step())))
            else:
                tf.sg_info('Training already finished at epoch[%d]-step[%d].' %
                           (ep - 1, sess.run(tf.sg_global_step())))
Ejemplo n.º 19
0
z = tf.random_normal((batch_size, rand_dim))

# generator
gen = generator(z).sg_squeeze()

#
# draw samples
#

with tf.Session() as sess:

    tf.sg_init(sess)

    # restore parameters
    tf.sg_restore(sess,
                  tf.train.latest_checkpoint('asset/train/gan'),
                  category='generator')

    # run generator
    imgs = sess.run(gen)

    # plot result
    _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
    for i in range(10):
        for j in range(10):
            ax[i][j].imshow(imgs[i * 10 + j], 'gray')
            ax[i][j].set_axis_off()
    plt.savefig('asset/train/gan/sample.png', dpi=600)
    tf.sg_info('Sample image saved to "asset/train/gan/sample.png"')
    plt.close()
Ejemplo n.º 20
0
def sg_tsne(tensor, meta_file='metadata.tsv', save_dir='asset/tsne'):
    r""" Manages arguments of `tf.sg_opt`.

    Args:
      **kwargs:
        lr: A Python Scalar (optional). Learning rate. Default is .001.

        eval_metric: A list of tensors containing the value to evaluate. Default is [].
        early_stop: Boolean. If True (default), the training should stop when the following two conditions are met.
          i. Current loss is less than .95 * previous loss.
          ii. Current learning rate is less than 5e-6.
        lr_reset: Boolean. If True, learning rate is set to opt.lr. when training restarts.
          Otherwise (Default), the value of the stored `_learning_rate` is taken.
        save_dir: A string. The root path to which checkpoint and log files are saved.
          Default is `asset/train`.
        max_ep: A positive integer. Maximum number of epochs. Default is 1000.
        ep_size: A positive integer. Number of Total batches in an epoch.
          For proper display of log. Default is 1e5.

        save_interval: A Python scalar. The interval of saving checkpoint files.
          By default, for every 600 seconds, a checkpoint file is written.
        log_interval: A Python scalar. The interval of recoding logs.
          By default, for every 60 seconds, logging is executed.
        max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
        keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.

        tqdm: Boolean. If True (Default), progress bars are shown.
        console_log: Boolean. If True, a series of loss will be shown
          on the console instead of tensorboard. Default is False.
        embeds: List of Tuple. [(Tensor to add, metadata path)]
    """

    # make directory if not exist
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # checkpoint saver
    saver = tf.train.Saver()

    # summary writer
    summary_writer = tf.summary.FileWriter(save_dir,
                                           graph=tf.get_default_graph())

    # embedding visualizer
    config = projector.ProjectorConfig()
    emb = config.embeddings.add()
    emb.tensor_name = tensor.name  # tensor
    # emb.metadata_path = os.path.join(save_dir, meta_file)   # metadata file
    projector.visualize_embeddings(summary_writer, config)

    # create session
    sess = tf.Session()
    # initialize variables
    sg_init(sess)

    # save tsne
    saver.save(sess, save_dir + '/model-tsne')

    # logging
    tf.sg_info('Tsne saved at %s' % (save_dir + '/model-tsne'))

    # close session
    sess.close()
Ejemplo n.º 21
0
#
# run network
#

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

    # init variables
    tf.sg_init(sess)

    # restore parameters
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

    # logging
    tf.sg_info('Testing started on %s set at global step[%08d].' %
               (tf.sg_arg().set.upper(), sess.run(tf.sg_global_step())))

    with tf.sg_queue_context():

        # create progress bar
        iterator = tqdm(range(0, int(data.num_batch * tf.sg_arg().frac)), total=int(data.num_batch * tf.sg_arg().frac),
                        initial=0, desc='test', ncols=70, unit='b', leave=False)

        # batch loop
        loss_avg = 0.
        for _ in iterator:

            # run session
            batch_loss = sess.run(loss)

            # loss history update
Ejemplo n.º 22
0
    def __init__(self, batch_size=16, set_name='train'):

        # load meta file
        label, mfcc_file = [], []
        self.daniter_label = []
        count = 0
        with open(_data_path +
                  'preprocess/meta/%s.csv' % set_name) as csv_file:
            reader = csv.reader(csv_file, delimiter=',')
            for row in reader:
                # DANITER magic small dataset
                if count not in [0, 24, 25, 26, 27]:
                    count += 1
                    if count == max_count:
                        break
                    continue

                # mfcc file
                mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] +
                                 '.npy')
                # label info ( convert to string object for variable-length support )
                label.append(np.asarray(row[1:], dtype=np.int).tostring())
                self.daniter_label.append(np.asarray(row[1:], dtype=np.int))

                # DANITER
                print "\n\n\n##### Label ######", count
                print "".join([
                    index2byte[letter]
                    for letter in np.asarray(row[1:], dtype=np.int)
                ])
                # DANITER

                count += 1
                if count == max_count:
                    break

        self.mfcc_file = mfcc_file

        # to constant tensor
        label_t = tf.convert_to_tensor(label)
        mfcc_file_t = tf.convert_to_tensor(mfcc_file)

        # create queue from constant tensor
        label_q, mfcc_file_q \
            = tf.train.slice_input_producer([label_t, mfcc_file_t], shuffle=True)

        # create label, mfcc queue
        label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
                                     dtypes=[tf.sg_intx, tf.sg_floatx],
                                     capacity=256,
                                     num_threads=64)

        # create batch queue with dynamic pad
        batch_queue = tf.train.batch([label_q, mfcc_q],
                                     batch_size,
                                     shapes=[(None, ), (20, None)],
                                     num_threads=64,
                                     capacity=batch_size * 32,
                                     dynamic_pad=True)

        # split data
        self.label, self.mfcc = batch_queue
        # batch * time * dim
        self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
        # calc total batch count
        self.num_batch = len(label) // batch_size

        # print info
        tf.sg_info('%s set loaded.(total data=%d, total batch=%d)' %
                   (set_name.upper(), len(label), self.num_batch))
Ejemplo n.º 23
0
    def wrapper(**kwargs):
        opt = tf.sg_opt(kwargs)

        # default training options
        opt += tf.sg_opt(lr=0.001,
                         save_dir='asset/train',
                         max_ep=1000,
                         ep_size=100000,
                         save_interval=600,
                         log_interval=60,
                         early_stop=True,
                         lr_reset=False,
                         eval_metric=[],
                         max_keep=5,
                         keep_interval=1,
                         tqdm=True,
                         console_log=False)

        # make directory if not exist
        if not os.path.exists(opt.save_dir + '/log'):
            os.makedirs(opt.save_dir + '/log')
        if not os.path.exists(opt.save_dir + '/ckpt'):
            os.makedirs(opt.save_dir + '/ckpt')

        # find last checkpoint
        last_file = tf.train.latest_checkpoint(opt.save_dir + '/ckpt')
        if last_file:
            ep = start_ep = int(last_file.split('-')[1]) + 1
            start_step = int(last_file.split('-')[2])
        else:
            ep = start_ep = 1
            start_step = 0

        # checkpoint saver
        saver = tf.train.Saver(max_to_keep=opt.max_keep,
                               keep_checkpoint_every_n_hours=opt.keep_interval)

        # summary writer
        summary_writer = tf.train.SummaryWriter(opt.save_dir + '/log',
                                                graph=tf.get_default_graph())

        # add learning rate summary
        with tf.name_scope('summary'):
            tf.scalar_summary('60. learning_rate/learning_rate',
                              _learning_rate)

        # add evaluation metric summary
        for m in opt.eval_metric:
            tf.sg_summary_metric(m)

        # summary op
        summary_op = tf.merge_all_summaries()

        # create session
        if opt.sess:
            sess = opt.sess
        else:
            # session with multiple GPU support
            sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
            # initialize variables
            sg_init(sess)

        # restore last checkpoint
        if last_file:
            saver.restore(sess, last_file)

        # set learning rate
        if start_ep == 1 or opt.lr_reset:
            sess.run(_learning_rate.assign(opt.lr))

        # logging
        tf.sg_info('Training started from epoch[%03d]-step[%d].' %
                   (start_ep, start_step))

        try:
            # start data queue runner
            with tf.sg_queue_context(sess):

                # set session mode to train
                tf.sg_set_train(sess)

                # loss history for learning rate decay
                loss, loss_prev, early_stopped = None, None, False

                # time stamp for saving and logging
                last_saved = last_logged = time.time()

                # epoch loop
                for ep in range(start_ep, opt.max_ep + 1):

                    # show progressbar
                    if opt.tqdm:
                        iterator = tqdm(range(opt.ep_size),
                                        desc='train',
                                        ncols=70,
                                        unit='b',
                                        leave=False)
                    else:
                        iterator = range(opt.ep_size)

                    # batch loop
                    for _ in iterator:

                        # call train function
                        batch_loss = func(sess, opt)

                        # loss history update
                        if batch_loss is not None:
                            if loss is None:
                                loss = np.mean(batch_loss)
                            else:
                                loss = loss * 0.9 + np.mean(batch_loss) * 0.1

                        # saving
                        if time.time() - last_saved > opt.save_interval:
                            last_saved = time.time()
                            saver.save(sess,
                                       opt.save_dir + '/ckpt/model-%03d' % ep,
                                       write_meta_graph=False,
                                       global_step=sess.run(
                                           tf.sg_global_step()))

                        # logging
                        if time.time() - last_logged > opt.log_interval:
                            last_logged = time.time()

                            # set session mode to infer
                            tf.sg_set_infer(sess)

                            # run evaluation op
                            if len(opt.eval_metric) > 0:
                                sess.run(opt.eval_metric)

                            if opt.console_log:  # console logging
                                # log epoch information
                                tf.sg_info(
                                    '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s'
                                    % (ep, sess.run(_learning_rate),
                                       sess.run(tf.sg_global_step()),
                                       ('NA' if loss is None else '%8.6f' %
                                        loss)))
                            else:  # tensorboard logging
                                # run logging op
                                summary_writer.add_summary(
                                    sess.run(summary_op),
                                    global_step=sess.run(tf.sg_global_step()))

                            # learning rate decay
                            if opt.early_stop and loss_prev:
                                # if loss stalling
                                if loss >= 0.95 * loss_prev:
                                    # early stopping
                                    current_lr = sess.run(_learning_rate)
                                    if current_lr < 5e-6:
                                        early_stopped = True
                                        break
                                    else:
                                        # decrease learning rate by half
                                        sess.run(
                                            _learning_rate.assign(current_lr /
                                                                  2.))

                            # update loss history
                            loss_prev = loss

                            # revert session mode to train
                            tf.sg_set_train(sess)

                    # log epoch information
                    if not opt.console_log:
                        tf.sg_info(
                            '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' %
                            (ep, sess.run(_learning_rate),
                             sess.run(tf.sg_global_step()),
                             ('NA' if loss is None else '%8.6f' % loss)))

                    if early_stopped:
                        tf.sg_info('\tEarly stopped ( no loss progress ).')
                        break
        finally:
            # save last epoch
            saver.save(sess,
                       opt.save_dir + '/ckpt/model-%03d' % ep,
                       write_meta_graph=False,
                       global_step=sess.run(tf.sg_global_step()))

            # set session mode to infer
            tf.sg_set_infer(sess)

            # logging
            tf.sg_info('Training finished at epoch[%d]-step[%d].' %
                       (ep, sess.run(tf.sg_global_step())))

            # close session
            if opt.sess is None:
                sess.close()
Ejemplo n.º 24
0
    def wrapper(**kwargs):
        r""" Manages arguments of `tf.sg_opt`.

        Args:
          **kwargs:
            lr: A Python Scalar (optional). Learning rate. Default is .001.

            eval_metric: A list of tensors containing the value to evaluate. Default is [].
            early_stop: Boolean. If True (default), the training should stop when the following two conditions are met.
              i. Current loss is less than .95 * previous loss.
              ii. Current learning rate is less than 5e-6.
            lr_reset: Boolean. If True, learning rate is set to opt.lr. when training restarts.
              Otherwise (Default), the value of the stored `_learning_rate` is taken.
            save_dir: A string. The root path to which checkpoint and log files are saved.
              Default is `asset/train`.
            max_ep: A positive integer. Maximum number of epochs. Default is 1000.
            ep_size: A positive integer. Number of Total batches in an epoch.
              For proper display of log. Default is 1e5.

            save_interval: A Python scalar. The interval of saving checkpoint files.
              By default, for every 600 seconds, a checkpoint file is written.
            log_interval: A Python scalar. The interval of recoding logs.
              By default, for every 60 seconds, logging is executed.
            max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
            keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.

            tqdm: Boolean. If True (Default), progress bars are shown.
            console_log: Boolean. If True, a series of loss will be shown
              on the console instead of tensorboard. Default is False.
        """
        opt = tf.sg_opt(kwargs)

        # default training options
        opt += tf.sg_opt(lr=0.001,
                         save_dir='asset/train',
                         max_ep=1000, ep_size=100000,
                         save_interval=600, log_interval=60,
                         early_stop=True, lr_reset=False,
                         eval_metric=[],
                         max_keep=5, keep_interval=1,
                         tqdm=True, console_log=False)

        # make directory if not exist
        if not os.path.exists(opt.save_dir):
            os.makedirs(opt.save_dir)

        # find last checkpoint
        last_file = tf.train.latest_checkpoint(opt.save_dir)
        if last_file:
            ep = start_ep = int(last_file.split('-')[1]) + 1
            start_step = int(last_file.split('-')[2])
        else:
            ep = start_ep = 1
            start_step = 0

        # checkpoint saver
        saver = tf.train.Saver(max_to_keep=opt.max_keep,
                               keep_checkpoint_every_n_hours=opt.keep_interval)

        # summary writer
        summary_writer = tf.summary.FileWriter(opt.save_dir, graph=tf.get_default_graph())

        # add learning rate summary
        tf.summary.scalar('learning_r', _learning_rate)

        # add evaluation metric summary
        for m in opt.eval_metric:
            tf.sg_summary_metric(m)

        # summary op
        summary_op = tf.summary.merge_all()

        # create session
        if opt.sess:
            sess = opt.sess
        else:
            # session with multiple GPU support
            sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        # initialize variables
        sg_init(sess)

        # restore last checkpoint
        if last_file:
            saver.restore(sess, last_file)

        # set learning rate
        if start_ep == 1 or opt.lr_reset:
            sess.run(_learning_rate.assign(opt.lr))

        # logging
        tf.sg_info('Training started from epoch[%03d]-step[%d].' % (start_ep, start_step))

        try:
            # start data queue runner
            with tf.sg_queue_context(sess):

                # set session mode to train
                tf.sg_set_train(sess)

                # loss history for learning rate decay
                loss, loss_prev, early_stopped = None, None, False

                # time stamp for saving and logging
                last_saved = last_logged = time.time()

                # epoch loop
                for ep in range(start_ep, opt.max_ep + 1):

                    # show progressbar
                    if opt.tqdm:
                        iterator = tqdm(range(opt.ep_size), desc='train', ncols=70, unit='b', leave=False)
                    else:
                        iterator = range(opt.ep_size)

                    # batch loop
                    for _ in iterator:

                        # call train function
                        batch_loss = func(sess, opt)

                        # loss history update
                        if batch_loss is not None:
                            if loss is None:
                                loss = np.mean(batch_loss)
                            else:
                                loss = loss * 0.9 + np.mean(batch_loss) * 0.1

                        # saving
                        if time.time() - last_saved > opt.save_interval:
                            last_saved = time.time()
                            saver.save(sess, opt.save_dir + '/model-%03d' % ep,
                                       write_meta_graph=False,
                                       global_step=sess.run(tf.sg_global_step()))

                        # logging
                        if time.time() - last_logged > opt.log_interval:
                            last_logged = time.time()

                            # set session mode to infer
                            tf.sg_set_infer(sess)

                            # run evaluation op
                            if len(opt.eval_metric) > 0:
                                sess.run(opt.eval_metric)

                            if opt.console_log:   # console logging
                                # log epoch information
                                tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' %
                                           (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()),
                                            ('NA' if loss is None else '%8.6f' % loss)))
                            else:   # tensorboard logging
                                # run logging op
                                summary_writer.add_summary(sess.run(summary_op),
                                                           global_step=sess.run(tf.sg_global_step()))

                            # learning rate decay
                            if opt.early_stop and loss_prev:
                                # if loss stalling
                                if loss >= 0.95 * loss_prev:
                                    # early stopping
                                    current_lr = sess.run(_learning_rate)
                                    if current_lr < 5e-6:
                                        early_stopped = True
                                        break
                                    else:
                                        # decrease learning rate by half
                                        sess.run(_learning_rate.assign(current_lr / 2.))

                            # update loss history
                            loss_prev = loss

                            # revert session mode to train
                            tf.sg_set_train(sess)

                    # log epoch information
                    if not opt.console_log:
                        tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' %
                                   (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()),
                                    ('NA' if loss is None else '%8.6f' % loss)))

                    if early_stopped:
                        tf.sg_info('\tEarly stopped ( no loss progress ).')
                        break
        finally:
            # save last epoch
            saver.save(sess, opt.save_dir + '/model-%03d' % ep,
                       write_meta_graph=False,
                       global_step=sess.run(tf.sg_global_step()))

            # set session mode to infer
            tf.sg_set_infer(sess)

            # logging
            tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step())))

            # close session
            if opt.sess is None:
                sess.close()
Ejemplo n.º 25
0
 # create session
 print "Starting session..."
 with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
     # console logging loop
     if not opt.tqdm:
         sv.loop(opt.log_interval, console_log, args=(sess, ))
 
     # get start epoch
     _step = sess.run(tf.sg_global_step())
     ep = 1
 
     # check if already finished
     if ep <= opt.max_ep:
 
         # logging
         tf.sg_info('Training started from epoch[%03d]-step[%d].' % (ep, _step))
 
         # epoch loop
         for ep in range(ep, opt.max_ep + 1):
 
             # update epoch info
             start_step = 1
             epoch = ep
 
             # create progressbar iterator
             if opt.tqdm:
                 iterator = tqdm(range(start_step, opt.ep_size), total=opt.ep_size, initial=start_step,
                                 desc='train', ncols=70, unit='b', leave=False)
             else:
                 iterator = range(start_step, opt.ep_size)
 
Ejemplo n.º 26
0
fig_name = 'asset/train/sample.png'
with tf.Session() as sess:
    with tf.sg_queue_context(sess):

        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        # run generator
        gt, low, bicubic, sr = sess.run([x.sg_squeeze(), x_nearest, x_bicubic, gen])

        # plot result
        _, ax = plt.subplots(10, 12, sharex=True, sharey=True)
        for i in range(10):
            for j in range(3):
                ax[i][j*4].imshow(low[i*3+j], 'gray')
                ax[i][j*4].set_axis_off()
                ax[i][j*4+1].imshow(bicubic[i*3+j], 'gray')
                ax[i][j*4+1].set_axis_off()
                ax[i][j*4+2].imshow(sr[i*3+j], 'gray')
                ax[i][j*4+2].set_axis_off()
                ax[i][j*4+3].imshow(gt[i*3+j], 'gray')
                ax[i][j*4+3].set_axis_off()

        plt.savefig(fig_name, dpi=600)
        tf.sg_info('Sample image saved to "%s"' % fig_name)
        plt.close()
Ejemplo n.º 27
0
    def __init__(self,
                 batch_size=8,
                 name='train',
                 path=join(DATA_DIR, 'out_data.data'),
                 b2i={}):
        if name == "train":

            print("Loading corpus...")
            # load train corpus
            if not b2i:
                sources, targets = self._load_corpus(mode='train', path=path)
            else:
                sources, targets = load_corpus_dict(path, b2i, 0, 150)
            print("Converting source to tensors...")
            # to constant tensor
            source = tf.convert_to_tensor(sources)
            print("Converting target to tensors...")
            target = tf.convert_to_tensor(targets)

            # create queue from constant tensor
            source, target = tf.train.slice_input_producer([source, target])

            # create batch queue
            batch_queue = tf.train.shuffle_batch([source, target],
                                                 batch_size,
                                                 num_threads=4,
                                                 capacity=batch_size * 8,
                                                 min_after_dequeue=batch_size *
                                                 4,
                                                 name=name)

            # split data
            self.source, self.target = batch_queue

            # calc total batch count
            self.num_batch = len(sources) // batch_size

            # print info
            tf.sg_info('Train data loaded.(total data=%d, total batch=%d)' %
                       (len(sources), self.num_batch))

        if name == "test":
            print("Loading test corpus...")

            sources = self._load_corpus(mode='test', path=path)
            targets = np.zeros_like(sources)
            print("Converting source to tensors...")
            # to constant tensor
            source = tf.convert_to_tensor(sources)
            print("Converting target to tensors...")
            target = tf.convert_to_tensor(targets)

            # create queue from constant tensor
            source, target = tf.train.slice_input_producer([source, target])

            # create batch queue
            batch_queue = tf.train.shuffle_batch([source, target],
                                                 batch_size,
                                                 num_threads=4,
                                                 capacity=batch_size * 8,
                                                 min_after_dequeue=batch_size *
                                                 4,
                                                 name=name)

            # split data
            self.source, self.target = batch_queue

            # calc total batch count
            self.num_batch = len(sources) // batch_size

            # print info
            tf.sg_info('Train data loaded.(total data=%d, total batch=%d)' %
                       (len(sources), self.num_batch))