コード例 #1
0
def create_architecture_adversarial(cfg, batch_size, multiplier_lst,
                                    logits_budget_lst_dct, loss_budget_lst_dct,
                                    scope, videos, utility_labels,
                                    budget_labels, dropout):
    '''
	Create the architecture of the adversarial model in the graph
	is_training: whether it is in the adversarial training part. (include testing, not two-fold)
	'''
    # fd part:
    degrad_videos = residualNet(videos, is_video=True)
    degrad_videos = _avg_replicate(
        degrad_videos) if FLAGS.use_avg_replicate else degrad_videos
    # fd part ends
    # fT part:
    logits_utility = utilityNet(degrad_videos, dropout, wd=0.001)
    loss_utility = tower_loss_xentropy_sparse(scope,
                                              logits_utility,
                                              utility_labels,
                                              use_weight_decay=True)
    # fT part ends
    # fb part:
    logits_budget = tf.zeros([batch_size, cfg['DATA']['NUM_CLASSES_BUDGET']])
    loss_budget = 0.0
    budget_logits_lst = []
    for multiplier in multiplier_lst:
        print(multiplier)
        logits = budgetNet(degrad_videos, depth_multiplier=multiplier)
        budget_logits_lst.append(logits)
        loss = tower_loss_xentropy_sparse(scope,
                                          logits,
                                          budget_labels,
                                          use_weight_decay=False)
        logits_budget_lst_dct[str(multiplier)].append(logits)
        loss_budget_lst_dct[str(multiplier)].append(loss)
        logits_budget += logits / FLAGS.NBudget
        loss_budget += loss / FLAGS.NBudget
    # fd part ends.
    # Find the largest budget loss of the M ensembled budget models:
    argmax_adverse_budget_loss = None
    # finish finding max_adverse_budget_loss and argmax_adverse_budget_loss.

    # change the definition of loss_degrad:
    loss_degrad = -loss_budget

    return loss_degrad, loss_budget, loss_utility, logits_budget, logits_utility, argmax_adverse_budget_loss
コード例 #2
0
def create_architecture_adversarial(scope, videos, budget_labels, istraining_placeholder):
	'''
	Create the architecture of the adversarial model in the graph
	Args:
		is_training: whether it is in the adversarial training part. (include testing, not two-fold)
	'''
	# fd part:
	degrad_videos = residualNet(videos, is_video=True)
	degrad_videos = _avg_replicate(degrad_videos) if FLAGS.use_avg_replicate else degrad_videos
	# fd part ends

	# fb part:
	loss_budget = 0.0
	logits_budget = tf.zeros([TRAIN_BATCH_SIZE, COMMON_FLAGS.NUM_CLASSES_BUDGET])
	logits = budgetNet(degrad_videos, depth_multiplier=0.6, is_training=istraining_placeholder)
	loss = tower_loss_xentropy_sparse(logits, budget_labels, use_weight_decay=False, name_scope=scope)
	logits_budget += logits
	loss_budget += loss
	# fd part ends.
	
	return loss_budget, logits_budget
コード例 #3
0
def visualize_degradation(checkpoint_dir):
    cfg = yaml.load(open('params.yml'))
    pp = pprint.PrettyPrinter()
    pp.pprint(cfg)
    if not os.path.exists(cfg['VIS_DIR']):
        os.makedirs(cfg['VIS_DIR'])

    videos_placeholder = tf.placeholder(
        tf.float32,
        shape=(cfg['BATCH_SIZE'] * cfg['GPU_NUM'], cfg['DEPTH'], 112, 112,
               cfg['NCHANNEL']))

    videos_degraded_lst = []
    with tf.variable_scope(tf.get_variable_scope()) as scope:
        for gpu_index in range(0, cfg['GPU_NUM']):
            with tf.device('/gpu:%d' % gpu_index):
                print('/gpu:%d' % gpu_index)
                with tf.name_scope('%s_%d' % ('gpu', gpu_index)) as scope:
                    videos_degraded = residualNet(
                        videos_placeholder[gpu_index *
                                           cfg['BATCH_SIZE']:(gpu_index + 1) *
                                           cfg['BATCH_SIZE']],
                        is_video=True)
                    if cfg['USE_AVG_REPLICATE']:
                        videos_degraded = _avg_replicate(videos_degraded)
                    videos_degraded_lst.append(videos_degraded)

                    # Reuse variables for the next tower.
                    tf.get_variable_scope().reuse_variables()
    videos_degraded_op = tf.concat(videos_degraded_lst, 0)
    train_files = [
        os.path.join(cfg['TRAIN_FILES_DIR'], f)
        for f in os.listdir(cfg['TRAIN_FILES_DIR']) if f.endswith('.tfrecords')
    ]
    val_files = [
        os.path.join(cfg['VAL_FILES_DIR'], f)
        for f in os.listdir(cfg['VAL_FILES_DIR']) if f.endswith('.tfrecords')
    ]

    print(train_files)
    print(val_files)
    videos_op, action_labels_op, actor_labels_op = inputs_videos(
        filenames=val_files,
        batch_size=cfg['GPU_NUM'] * cfg['BATCH_SIZE'],
        num_epochs=1,
        num_threads=cfg['NUM_THREADS'],
        num_examples_per_epoch=cfg['NUM_EXAMPLES_PER_EPOCH'],
        shuffle=False)

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        saver = tf.train.Saver(tf.trainable_variables())
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir=checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Session restored from trained model at {}!'.format(
                ckpt.model_checkpoint_path))
        else:
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    checkpoint_dir)
        try:
            #videos_degraded_lst = []
            #action_labels_lst = []
            #actor_labels_lst = []
            directory = cfg['VIS_DIR']
            while not coord.should_stop():
                videos, action_labels, actor_labels = sess.run(
                    [videos_op, action_labels_op, actor_labels_op])
                videos_degraded_value = sess.run(
                    videos_degraded_op, feed_dict={videos_placeholder: videos})
                videos.tolist()
                videos_degraded_value.tolist()

                #videos_degraded_lst.append(videos_degraded_value*255)
                #videos_degraded_lst.extend(videos_degraded_value)
                #action_labels_lst.extend(action_labels)
                #actor_labels_lst.extend(actor_labels)

                visualize(directory, videos_degraded_value, videos,
                          action_labels, actor_labels)
                #raise tf.errors.OutOfRangeError
        except tf.errors.OutOfRangeError:
            print('Done testing on all the examples')
        finally:
            coord.request_stop()
            coord.join(threads)
コード例 #4
0
def run_training(cfg, degrad_ckpt_file, ckpt_dir, model_name, max_steps,
                 train_from_scratch, ckpt_path):
    # Create model directory
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
    continue_from_trained_model = False

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = True
    network_fn = nets_factory.get_network_fn(
        model_name,
        num_classes=cfg['DATA']['NUM_CLASSES'],
        weight_decay=cfg['TRAIN']['WEIGHT_DECAY'],
        is_training=True)
    with tf.Graph().as_default():
        with tf.Session(config=config) as sess:
            global_step = tf.get_variable(
                'global_step', [],
                initializer=tf.constant_initializer(0),
                trainable=False)
            videos_placeholder, labels_placeholder, istraining_placeholder = placeholder_inputs(
                cfg['DATA']['BATCH_SIZE'] * cfg['TRAIN']['GPU_NUM'])
            tower_grads = []
            logits_lst = []
            losses_lst = []
            opt = tf.train.AdamOptimizer(1e-4)
            with tf.variable_scope(tf.get_variable_scope()) as scope:
                for gpu_index in range(0, cfg['TRAIN']['GPU_NUM']):
                    with tf.device('/gpu:%d' % gpu_index):
                        print('/gpu:%d' % gpu_index)
                        with tf.name_scope('%s_%d' %
                                           ('gpu', gpu_index)) as scope:
                            degrad_videos = residualNet(
                                videos_placeholder[gpu_index *
                                                   cfg['TRAIN']['BATCH_SIZE']:
                                                   (gpu_index + 1) *
                                                   cfg['TRAIN']['BATCH_SIZE']],
                                is_video=True)
                            degrad_videos = tf.reshape(degrad_videos, [
                                cfg['TRAIN']['BATCH_SIZE'] *
                                cfg['DATA']['DEPTH'], cfg['DATA']['HEIGHT'],
                                cfg['DATA']['WIDTH'], cfg['DATA']['NCHANNEL']
                            ])

                            logits, _ = network_fn(degrad_videos)
                            logits = tf.reshape(logits, [
                                -1, cfg['DATA']['DEPTH'],
                                cfg['DATA']['NUM_CLASSES']
                            ])
                            logits = tf.reduce_mean(logits,
                                                    axis=1,
                                                    keep_dims=False)
                            logits_lst.append(logits)
                            loss = tower_loss_xentropy_sparse(
                                scope,
                                logits,
                                labels=labels_placeholder[
                                    gpu_index *
                                    cfg['TRAIN']['BATCH_SIZE']:(gpu_index +
                                                                1) *
                                    cfg['TRAIN']['BATCH_SIZE'], :])

                            logits_lst.append(logits)
                            losses_lst.append(loss)
                            print([v.name for v in tf.trainable_variables()])
                            varlist_budget = [
                                v for v in tf.trainable_variables()
                                if any(x in v.name for x in [
                                    "InceptionV1", "InceptionV2",
                                    "resnet_v1_50", "resnet_v1_101",
                                    "resnet_v2_50", "resnet_v2_101",
                                    "MobilenetV1_1.0", "MobilenetV1_0.75",
                                    "MobilenetV1_0.5", 'MobilenetV1_0.25'
                                ])
                            ]

                            varlist_degrad = [
                                v for v in tf.trainable_variables()
                                if v not in varlist_budget
                            ]
                            tower_grads.append(
                                opt.compute_gradients(loss, varlist_budget))
                            tf.get_variable_scope().reuse_variables()
            loss_op = tf.reduce_mean(losses_lst)
            logits_op = tf.concat(logits_lst, 0)
            acc_op = accuracy(logits_op, labels_placeholder)
            grads = average_gradients(tower_grads)

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            print(update_ops)
            with tf.control_dependencies([tf.group(*update_ops)]):
                train_op = opt.apply_gradients(grads, global_step=global_step)

            train_files = [
                os.path.join(cfg['DATA']['TRAIN_FILES_DIR'], f)
                for f in os.listdir(cfg['DATA']['TRAIN_FILES_DIR'])
                if f.endswith('.tfrecords')
            ]
            val_files = [
                os.path.join(cfg['DATA']['VAL_FILES_DIR'], f)
                for f in os.listdir(cfg['DATA']['VAL_FILES_DIR'])
                if f.endswith('.tfrecords')
            ]
            print(
                '#############################Reading from files###############################'
            )
            print(train_files)
            print(val_files)

            tr_videos_op, _, tr_labels_op = inputs_videos(
                filenames=train_files,
                batch_size=cfg['TRAIN']['BATCH_SIZE'] *
                cfg['TRAIN']['BATCH_SIZE'],
                num_epochs=None,
                num_threads=cfg['DATA']['NUM_THREADS'],
                num_examples_per_epoch=cfg['TRAIN']['NUM_EXAMPLES_PER_EPOCH'],
                shuffle=True)
            val_videos_op, _, val_labels_op = inputs_videos(
                filenames=val_files,
                batch_size=cfg['TRAIN']['BATCH_SIZE'] *
                cfg['TRAIN']['BATCH_SIZE'],
                num_epochs=None,
                num_threads=cfg['DATA']['NUM_THREADS'],
                num_examples_per_epoch=cfg['TRAIN']['NUM_EXAMPLES_PER_EPOCH'],
                shuffle=True)

            init_op = tf.group(tf.local_variables_initializer(),
                               tf.global_variables_initializer())
            sess.run(init_op)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            gvar_list = tf.global_variables()
            bn_moving_vars = [g for g in gvar_list if 'moving_mean' in g.name]
            bn_moving_vars += [
                g for g in gvar_list if 'moving_variance' in g.name
            ]
            print([var.name for var in bn_moving_vars])

            def restore_model(dir, varlist, modulename):
                import re
                regex = re.compile(r'(MobilenetV1_?)(\d*\.?\d*)',
                                   re.IGNORECASE)
                if 'mobilenet' in modulename:
                    varlist = {
                        regex.sub('MobilenetV1', v.name[:-2]): v
                        for v in varlist
                    }
                if os.path.isfile(dir):
                    print(varlist)
                    saver = tf.train.Saver(varlist)
                    saver.restore(sess, dir)
                    print(
                        '#############################Session restored from pretrained model at {}!#############################'
                        .format(dir))
                else:
                    ckpt = tf.train.get_checkpoint_state(checkpoint_dir=dir)
                    if ckpt and ckpt.model_checkpoint_path:
                        saver = tf.train.Saver(varlist)
                        saver.restore(sess, ckpt.model_checkpoint_path)
                        print(
                            '#############################Session restored from pretrained model at {}!#############################'
                            .format(ckpt.model_checkpoint_path))

            if continue_from_trained_model:
                varlist = varlist_budget
                varlist += bn_moving_vars
                saver = tf.train.Saver(varlist)
                ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    print(
                        '#############################Session restored from trained model at {}!###############################'
                        .format(ckpt.model_checkpoint_path))
                else:
                    raise FileNotFoundError(errno.ENOENT,
                                            os.strerror(errno.ENOENT),
                                            ckpt_dir)
            else:
                if not train_from_scratch:
                    saver = tf.train.Saver(varlist_degrad)
                    print(degrad_ckpt_file)
                    saver.restore(sess, degrad_ckpt_file)

                    varlist = [
                        v for v in varlist_budget + bn_moving_vars
                        if not any(x in v.name for x in ["logits"])
                    ]
                    restore_model(ckpt_path, varlist, model_name)

            saver = tf.train.Saver(tf.trainable_variables() + bn_moving_vars,
                                   max_to_keep=1)
            for step in xrange(max_steps):
                start_time = time.time()
                train_videos, train_labels = sess.run(
                    [tr_videos_op, tr_labels_op])
                _, loss_value = sess.run(
                    [train_op, loss_op],
                    feed_dict={
                        videos_placeholder: train_videos,
                        labels_placeholder: train_labels,
                        istraining_placeholder: True
                    })
                assert not np.isnan(
                    np.mean(loss_value)), 'Model diverged with loss = NaN'
                duration = time.time() - start_time
                print('Step: {:4d} time: {:.4f} loss: {:.8f}'.format(
                    step, duration, np.mean(loss_value)))
                if step % cfg['TRAIN']['VAL_STEP'] == 0:
                    start_time = time.time()
                    tr_videos, tr_labels = sess.run(
                        [tr_videos_op, tr_labels_op])
                    acc, loss_value = sess.run(
                        [acc_op, loss_op],
                        feed_dict={
                            videos_placeholder: tr_videos,
                            labels_placeholder: tr_labels,
                            istraining_placeholder: False
                        })
                    print(
                        "Step: {:4d} time: {:.4f}, training accuracy: {:.5f}, loss: {:.8f}"
                        .format(step,
                                time.time() - start_time, acc, loss_value))

                    # train_writer.add_summary(summary, step)

                    start_time = time.time()
                    val_videos, val_labels = sess.run(
                        [val_videos_op, val_labels_op])
                    acc, loss_value = sess.run(
                        [acc_op, loss_op],
                        feed_dict={
                            videos_placeholder: val_videos,
                            labels_placeholder: val_labels,
                            istraining_placeholder: False
                        })
                    print(
                        "Step: {:4d} time: {:.4f}, validation accuracy: {:.5f}, loss: {:.8f}"
                        .format(step,
                                time.time() - start_time, acc, loss_value))
                    # test_writer.add_summary(summary, step)

                    # Save a checkpoint and evaluate the model periodically.
                if step % cfg['TRAIN']['SAVE_STEP'] == 0 or (
                        step + 1) == cfg['TRAIN']['MAX_STEPS']:
                    checkpoint_path = os.path.join(ckpt_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

            coord.request_stop()
            coord.join(threads)

    print("done")
コード例 #5
0
def run_testing(cfg, degrad_ckpt_file, ckpt_dir, model_name, is_training):
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = True
    network_fn = nets_factory.get_network_fn(
        model_name,
        num_classes=cfg['DATA']['NUM_CLASSES'],
        weight_decay=cfg['TRAIN']['WEIGHT_DECAY'],
        is_training=True)
    with tf.Graph().as_default():
        with tf.Session(config=config) as sess:
            global_step = tf.get_variable(
                'global_step', [],
                initializer=tf.constant_initializer(0),
                trainable=False)
            videos_placeholder, _, labels_placeholder, _, _ = placeholder_inputs(
                cfg['DATA']['BATCH_SIZE'] * cfg['TRAIN']['GPU_NUM'])
            istraining_placeholder = tf.placeholder(tf.bool)
            logits_lst = []
            with tf.variable_scope(tf.get_variable_scope()) as scope:
                for gpu_index in range(0, cfg['TRAIN']['GPU_NUM']):
                    with tf.device('/gpu:%d' % gpu_index):
                        print('/gpu:%d' % gpu_index)
                        with tf.name_scope('%s_%d' %
                                           ('gpu', gpu_index)) as scope:
                            degrad_videos = residualNet(
                                videos_placeholder[gpu_index *
                                                   cfg['TRAIN']['BATCH_SIZE']:
                                                   (gpu_index + 1) *
                                                   cfg['TRAIN']['BATCH_SIZE']],
                                is_video=True)
                            degrad_videos = tf.reshape(degrad_videos, [
                                cfg['TRAIN']['BATCH_SIZE'] *
                                cfg['DATA']['DEPTH'], cfg['DATA']['HEIGHT'],
                                cfg['DATA']['WIDTH'], cfg['DATA']['NCHANNEL']
                            ])
                            logits, _ = network_fn(degrad_videos)
                            logits = tf.reshape(logits, [
                                -1, cfg['DATA']['DEPTH'],
                                cfg['DATA']['NUM_CLASSES']
                            ])
                            logits = tf.reduce_mean(logits,
                                                    axis=1,
                                                    keep_dims=False)
                            logits_lst.append(logits)
                            tf.get_variable_scope().reuse_variables()
            logits_op = tf.concat(logits_lst, 0)

            right_count_op = tf.reduce_sum(
                tf.cast(
                    tf.equal(tf.argmax(tf.nn.softmax(logits_op), axis=1),
                             labels_placeholder), tf.int32))
            softmax_logits_op = tf.nn.softmax(logits_op)

            train_files = [
                os.path.join(cfg['DATA']['TRAIN_FILES_DIR'], f)
                for f in os.listdir(cfg['DATA']['TRAIN_FILES_DIR'])
                if f.endswith('.tfrecords')
            ]
            val_files = [
                os.path.join(cfg['DATA']['VAL_FILES_DIR'], f)
                for f in os.listdir(cfg['DATA']['VAL_FILES_DIR'])
                if f.endswith('.tfrecords')
            ]
            print(
                '#############################Reading from files###############################'
            )
            print(train_files)
            print(val_files)
            print(
                '#############################Reading from files###############################'
            )
            print(train_files)
            print(val_files)

            if is_training:
                videos_op, _, labels_op = inputs_videos(
                    filenames=train_files,
                    batch_size=cfg['TRAIN']['BATCH_SIZE'] *
                    cfg['TRAIN']['BATCH_SIZE'],
                    num_epochs=None,
                    num_threads=cfg['DATA']['NUM_THREADS'],
                    num_examples_per_epoch=cfg['TRAIN']
                    ['NUM_EXAMPLES_PER_EPOCH'],
                    shuffle=False)
            else:
                videos_op, _, labels_op = inputs_videos(
                    filenames=val_files,
                    batch_size=cfg['TRAIN']['BATCH_SIZE'] *
                    cfg['TRAIN']['BATCH_SIZE'],
                    num_epochs=None,
                    num_threads=cfg['DATA']['NUM_THREADS'],
                    num_examples_per_epoch=cfg['TRAIN']
                    ['NUM_EXAMPLES_PER_EPOCH'],
                    shuffle=False)

            init_op = tf.group(tf.local_variables_initializer(),
                               tf.global_variables_initializer())
            sess.run(init_op)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            varlist_budget = [
                v for v in tf.trainable_variables()
                if any(x in v.name for x in [
                    "InceptionV1", "InceptionV2", "resnet_v1_50",
                    "resnet_v1_101", "resnet_v2_50", "resnet_v2_101",
                    "MobilenetV1_1.0", "MobilenetV1_0.75", "MobilenetV1_0.5",
                    'MobilenetV1_0.25'
                ])
            ]

            varlist_degrad = [
                v for v in tf.trainable_variables() if v not in varlist_budget
            ]

            saver = tf.train.Saver(varlist_degrad)
            saver.restore(sess, degrad_ckpt_file)

            gvar_list = tf.global_variables()
            bn_moving_vars = [g for g in gvar_list if 'moving_mean' in g.name]
            bn_moving_vars += [
                g for g in gvar_list if 'moving_variance' in g.name
            ]
            saver = tf.train.Saver(tf.trainable_variables() + bn_moving_vars)
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Session restored from pretrained budget model at {}!'.
                      format(ckpt.model_checkpoint_path))
            else:
                raise FileNotFoundError(errno.ENOENT,
                                        os.strerror(errno.ENOENT), ckpt_dir)
            total_v = 0.0
            test_correct_num = 0.0
            try:
                while not coord.should_stop():
                    videos, labels = sess.run([videos_op, labels_op])
                    # write_video(videos, labels)
                    feed = {
                        videos_placeholder: videos,
                        labels_placeholder: labels,
                        istraining_placeholder: False
                    }
                    right, softmax_logits = sess.run(
                        [right_count_op, softmax_logits_op], feed_dict=feed)
                    test_correct_num += right
                    total_v += labels.shape[0]
                    print(softmax_logits.shape)
                    # print(tf.argmax(softmax_logits, 1).eval(session=sess))
                    # print(logits.eval(feed_dict=feed, session=sess))
                    # print(labels)
            except tf.errors.OutOfRangeError:
                print('Done testing on all the examples')
            finally:
                coord.request_stop()
            print('test acc:', test_correct_num / total_v, 'test_correct_num:',
                  test_correct_num, 'total_v:', total_v)
            with open('TwoFoldEvaluationResults_{}.txt'.format(model_name),
                      'w') as wf:
                wf.write('test acc: {}\ttest_correct_num:{}\ttotal_v\n'.format(
                    test_correct_num / total_v, test_correct_num, total_v))
            coord.join(threads)
            sess.close()

    print("done")
コード例 #6
0
def build_graph(model_name):
    '''
    Returns:
        graph, init_op, train_op,
        logits_op, acc_op, correct_count_op, loss_op,
        tr_videos_op, tr_actor_labels_op, val_videos_op, val_actor_labels_op, test_videos_op, test_actor_labels_op,
        videos_placeholder, labels_placeholder,
        varlist_budget, varlist_degrad
    '''
    graph = tf.Graph()
    with graph.as_default():
        # global step:
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
        # placholder inputs for graph:
        videos_placeholder, labels_placeholder, istraining_placeholder = placeholder_inputs(cfg['TRAIN']['BATCH_SIZE'] * FLAGS.GPU_NUM, cfg)
        # degradation models:
        network_fn = nets_factory.get_network_fn(model_name,
                                                num_classes=cfg['DATA']['NUM_CLASSES'],
                                                weight_decay=cfg['TRAIN']['WEIGHT_DECAY'],
                                                is_training=istraining_placeholder)
        # grads, logits, loss list:
        tower_grads = []
        logits_lst = []
        losses_lst = []
        # operation method:
        opt = tf.train.AdamOptimizer(1e-4)

        with tf.variable_scope(tf.get_variable_scope()) as scope:
            for gpu_index in range(0, FLAGS.GPU_NUM):
                with tf.device('/gpu:%d' % gpu_index):
                    print('/gpu:%d' % gpu_index)
                    with tf.name_scope('%s_%d' % ('gpu', gpu_index)) as scope:

                        videos = videos_placeholder[gpu_index * cfg['TRAIN']['BATCH_SIZE']:(gpu_index + 1) * cfg['TRAIN']['BATCH_SIZE']]
                        budget_labels = labels_placeholder[gpu_index * cfg['TRAIN']['BATCH_SIZE']:(gpu_index + 1) * cfg['TRAIN']['BATCH_SIZE']]

                        degrad_videos = residualNet(videos, is_video=True)
                        degrad_videos = tf.reshape(degrad_videos, [cfg['TRAIN']['BATCH_SIZE'] * cfg['DATA']['DEPTH'], cfg['DATA']['CROP_HEIGHT'], cfg['DATA']['CROP_WIDTH'], cfg['DATA']['NCHANNEL']])
                        # logits:
                        logits, _ = network_fn(degrad_videos)
                        logits = tf.reshape(logits, [-1, cfg['DATA']['DEPTH'], cfg['DATA']['NUM_CLASSES']])
                        logits = tf.reduce_mean(logits, axis=1, keep_dims=False)
                        # loss:
                        loss = tower_loss_xentropy_sparse(scope, logits, budget_labels)
                        # append list:
                        logits_lst.append(logits)
                        losses_lst.append(loss)

                        # varible list of budget model:
                        varlist_budget = [v for v in tf.trainable_variables() if
                                            any(x in v.name for x in ["InceptionV1", "InceptionV2",
                                            "resnet_v1_50", "resnet_v1_101", "resnet_v2_50", "resnet_v2_101",
                                            'MobilenetV1'])]
                        # varible list of degrade model:
                        varlist_degrad = [v for v in tf.trainable_variables() if v not in varlist_budget]
                        # append grads:
                        tower_grads.append(opt.compute_gradients(loss, varlist_budget))

                        # reuse variables:
                        tf.get_variable_scope().reuse_variables()
        # loss tensor:
        loss_op = tf.reduce_mean(losses_lst)
        # acc tensor:
        logits_op = tf.concat(logits_lst, 0)
        acc_op = accuracy(logits_op, labels_placeholder)
        # how many is correctly classified:
        correct_count_op = tf.reduce_sum(
                tf.cast(tf.equal(tf.argmax(tf.nn.softmax(logits_op), axis=1), labels_placeholder), tf.int32))
        # grads tensor:
        grads = average_gradients(tower_grads) # average gradient over all GPUs

        # apply gradients operation:
        with tf.control_dependencies([tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))]):
            train_op = opt.apply_gradients(grads, global_step=global_step)

        # input operations:
        tr_videos_op, _, tr_actor_labels_op = create_videos_reading_ops(is_train=True, is_val=False, cfg=cfg)
        val_videos_op, _, val_actor_labels_op = create_videos_reading_ops(is_train=False, is_val=True, cfg=cfg)
        test_videos_op, _, test_actor_labels_op = create_videos_reading_ops(is_train=False, is_val=False, cfg=cfg)
        # initialize operations:
        init_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())

        return (graph, init_op, train_op,
                logits_op, acc_op, correct_count_op, loss_op,
                tr_videos_op, tr_actor_labels_op, val_videos_op, val_actor_labels_op, test_videos_op, test_actor_labels_op,
                videos_placeholder, labels_placeholder, istraining_placeholder,
                varlist_budget, varlist_degrad)