model_name = teacher.variables[0].name.split('/')[0]
        trained = sio.loadmat(args.trained_param)
        n = 0
        for v in teacher.variables:
            if model_name in v.name:
                v.assign(trained[v.name[len(model_name) + 1:]])
                n += 1
        print(n, 'params loaded')

        Knowledge = importlib.import_module('distiller.' + args.Knowledge)
        model.distiller = Knowledge.distill(args, model, teacher)
        model(np.zeros([1] + args.input_shape, dtype=np.float32),
              training=False)

    train_step, train_loss, train_accuracy,\
    test_step,  test_loss,  test_accuracy, optimizer = op_util.Optimizer(model, args.weight_decay, args.learning_rate)

    args.decay_points = [
        int(dp * args.train_epoch) if dp < 1 else int(dp)
        for dp in args.decay_points
    ]

    def scheduler(epoch):
        lr = args.learning_rate
        for dp in args.decay_points:
            if epoch >= dp:
                lr *= args.decay_rate
        return lr

    summary_writer = tf.summary.create_file_writer(args.train_path,
                                                   flush_millis=30000)
Exemplo n.º 2
0
 tf.config.experimental.set_memory_growth(gpus[0], True)
 tf.config.experimental.set_visible_devices(gpus[gpu_num], 'GPU')
 
 summary_writer = tf.summary.create_file_writer(args.train_dir)
 
 train_images, train_labels, val_images, val_labels, pre_processing = Dataloader(args.dataset, '')
 train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(100).batch(batch_size)
 train_ds = train_ds.map(pre_processing(is_training = True))
 test_ds = tf.data.Dataset.from_tensor_slices((val_images, val_labels)).batch(val_batch_size)
 test_ds = test_ds.map(pre_processing(is_training = False))
 
 student_model = WResNet.Model(architecture=[40,4], weight_decay = weight_decay, num_class = np.max(train_labels)+1)
 
 LR_scheduler = op_util.learning_rate_scheduler(Learning_rate, train_epoch, decay_points = [0.3, 0.6, 0.8], decay_rate = 2e-1)
 train_step, train_loss, train_accuracy,\
 test_step,  test_loss,  test_accuracy = op_util.Optimizer(student_model, Learning_rate)
 
 with summary_writer.as_default():
     step = 0
     
     for epoch in range(train_epoch):
         lr = LR_scheduler(epoch)
         train_time = time.time()
         for images, labels in train_ds:
             train_step(images, labels, lr)
             step += 1
             if step % should_log == 0:
                 template = 'Global step {0:5d}: loss = {1:0.4f} ({2:1.3f} sec/step)'
                 print (template.format(step, train_loss.result(), (time.time()-train_time)/should_log))
                 train_time = time.time()
         tf.summary.scalar('Categorical_loss/train', train_loss.result(), step=epoch+1)
Exemplo n.º 3
0
        args.decay_points = [
            int(dp * args.train_epoch) if dp < 1 else int(dp)
            for dp in args.decay_points
        ]
        LR = op_util.PiecewiseConstantDecay(
            [dp * cardinality for dp in args.decay_points], [
                args.learning_rate * args.decay_rate**i
                for i in range(len(args.decay_points) + 1)
            ])

    if args.slimmable:
        train_step, train_loss, train_accuracy,\
        test_step,  test_loss,  test_accuracy = op_util.Slimmable_optimizer(args, model, args.weight_decay, args.learning_rate)
    else:
        train_step, train_loss, train_accuracy,\
        test_step,  test_loss,  test_accuracy = op_util.Optimizer(model, args.weight_decay, LR)

    with summary_writer.as_default():
        step = 0
        logs = {'training_acc': [], 'validation_acc': []}

        model_name = model.variables[0].name.split('/')[0]
        train_time = time.time()
        init_epoch = 0
        if args.slimmable:
            ## Warm-up training
            if args.trained_slimmable is None or 'None' in args.trained_slimmable:
                print('Warm-up training starts')
                slim_util.Warm_up(args, model, train_step,
                                  ceil(args.train_epoch * .3),
                                  datasets['train_sub'], train_loss,
Exemplo n.º 4
0
    tf.debugging.set_log_device_placement(False)
    gpus = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(gpus[0], True)

    _, _, test_images, test_labels = Dataloader(args.dataset, '')
    test_images = test_images.reshape(test_images.shape[0],
                                      -1).astype(np.float32)
    test_images = test_images / 255

    if os.path.isfile(home_path + '/pre_built/test_full.mat'):
        DAD = sio.loadmat(home_path + '/pre_built/test_full.mat')
    else:
        DAD = get_affinity(home_path, test_images, k)
    model = GALA.Model(DAD=DAD, name='GALA', trainable=True)

    init_step, init_loss, finetuning, validate, ACC, NMI, ARI = op_util.Optimizer(
        model, [train_lr, finetune_lr])

    summary_writer = tf.summary.create_file_writer(args.train_dir)
    with summary_writer.as_default():
        step = 0

        best_loss = 1e12
        stopping_step = 0

        train_time = time.time()
        for epoch in range(maximum_epoch):
            #            init_step(test_images, test_labels)
            init_step(test_images, test_labels, weight_decay, k)
            step += 1

            if epoch % do_log == 0 or epoch == maximum_epoch - 1:
Exemplo n.º 5
0
    args.input_size = list(train_images.shape[1:])

    test_ds = tf.data.Dataset.from_tensor_slices((val_images, val_labels))
    test_ds = test_ds.map(pre_processing(is_training=False),
                          num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test_ds = test_ds.batch(args.val_batch_size)
    test_ds = test_ds.cache().prefetch(tf.data.experimental.AUTOTUNE)

    if 'WResNet' in args.arch:
        arch = [int(a) for a in args.arch.split('-')[1:]]
        model = WResNet.Model(architecture=arch,
                              num_class=np.max(train_labels) + 1,
                              name='WResNet',
                              trainable=True)

    _, _, _, test_step, test_loss, test_accuracy, _ = op_util.Optimizer(
        model, 0., 0.)
    model(np.zeros([1] + args.input_size, dtype=np.float32), training=False)

    trained = sio.loadmat(args.trained_param)
    n = 0
    model_name = model.variables[0].name.split('/')[0]
    for v in model.variables:
        v.assign(trained[v.name[len(model_name) + 1:]])
        n += 1
    print(n, 'params loaded')

    for test_images, test_labels in test_ds:
        test_step(test_images, test_labels)
    ori_acc = test_accuracy.result().numpy()
    test_loss.reset_states()
    test_accuracy.reset_states()
Exemplo n.º 6
0
def main(_):
    ### define path and hyper-parameter
    Learning_rate = 1e-1

    batch_size = 128
    val_batch_size = 200
    train_epoch = 100
    init_epoch = 20 if FLAGS.Distillation in {'IEP'} else 0

    total_epoch = init_epoch + train_epoch
    weight_decay = 5e-4

    should_log = 200
    save_summaries_secs = 20
    tf.logging.set_verbosity(tf.logging.INFO)
    gpu_num = '0'

    if FLAGS.Distillation == 'None':
        FLAGS.Distillation = None

    train_images, train_labels, val_images, val_labels, pre_processing, teacher = Dataloader(
        FLAGS.dataset, home_path, FLAGS.model_name)
    num_label = int(np.max(train_labels) + 1)

    dataset_len, *image_size = train_images.shape

    with tf.Graph().as_default() as graph:
        # make placeholder for inputs
        image_ph = tf.placeholder(tf.uint8, [None] + image_size)
        label_ph = tf.placeholder(tf.int32, [None])

        is_training_ph = tf.placeholder(tf.bool, [])

        # pre-processing
        image = pre_processing(image_ph, is_training_ph)
        label = tf.contrib.layers.one_hot_encoding(label_ph,
                                                   num_label,
                                                   on_value=1.0)

        # make global step
        global_step = tf.train.create_global_step()
        epoch = tf.floor_div(
            tf.cast(global_step, tf.float32) * batch_size, dataset_len)
        max_number_of_steps = int(dataset_len * total_epoch) // batch_size + 1

        # make learning rate scheduler
        LR = learning_rate_scheduler(Learning_rate,
                                     [epoch, init_epoch, train_epoch],
                                     [0.3, 0.6, 0.8], 0.2)

        ## load Net
        class_loss, accuracy = MODEL(FLAGS.model_name,
                                     FLAGS.main_scope,
                                     image,
                                     label,
                                     [is_training_ph, epoch < init_epoch],
                                     Distillation=FLAGS.Distillation)

        #make training operator
        if FLAGS.Distillation in {'IEP'}:
            train_op, train_op2 = op_util.Optimizer_w_IEP(
                class_loss, LR, weight_decay, epoch, init_epoch, global_step)
        else:
            train_op = op_util.Optimizer(class_loss, LR, weight_decay, epoch,
                                         init_epoch, global_step)

        ## collect summary ops for plotting in tensorboard
        summary_op = tf.summary.merge(tf.get_collection(
            tf.GraphKeys.SUMMARIES)[:1],
                                      name='summary_op')

        ## make placeholder and summary op for training and validation results
        train_acc_place = tf.placeholder(dtype=tf.float32)
        val_acc_place = tf.placeholder(dtype=tf.float32)
        val_summary = [
            tf.summary.scalar('accuracy/training_accuracy', train_acc_place),
            tf.summary.scalar('accuracy/validation_accuracy', val_acc_place)
        ]
        val_summary_op = tf.summary.merge(list(val_summary),
                                          name='val_summary_op')

        ## start training
        train_writer = tf.summary.FileWriter('%s' % FLAGS.train_dir,
                                             graph,
                                             flush_secs=save_summaries_secs)
        config = ConfigProto()
        config.gpu_options.visible_device_list = gpu_num
        config.gpu_options.allow_growth = True

        val_itr = len(val_labels) // val_batch_size
        logs = {'training_acc': [], 'validation_acc': []}
        with tf.Session(config=config) as sess:
            if FLAGS.Distillation is not None:
                global_variables = tf.get_collection(
                    tf.GraphKeys.GLOBAL_VARIABLES)
                n = 0
                for v in global_variables:
                    if teacher.get(v.name[:-2]) is not None:
                        v._initial_value = tf.constant(
                            teacher[v.name[:-2]].reshape(
                                *v.get_shape().as_list()))
                        v._initializer_op = tf.assign(v._variable,
                                                      v._initial_value,
                                                      name=v.name[:-2] +
                                                      '/Assign').op
                        n += 1
                print('%d Teacher params assigned' % n)
            sess.run(tf.global_variables_initializer())

            sum_train_accuracy = []
            time_elapsed = []
            total_loss = []
            idx = list(range(train_labels.shape[0]))
            shuffle(idx)
            epoch_ = 0

            for step in range(max_number_of_steps):
                start_time = time.time()

                ## feed data
                if (step * batch_size) // dataset_len < init_epoch:
                    tl, log, train_acc = sess.run(
                        [train_op2, summary_op, accuracy],
                        feed_dict={
                            image_ph: train_images[idx[:batch_size]],
                            label_ph:
                            np.squeeze(train_labels[idx[:batch_size]]),
                            is_training_ph: True
                        })
                else:
                    tl, log, train_acc = sess.run(
                        [train_op, summary_op, accuracy],
                        feed_dict={
                            image_ph: train_images[idx[:batch_size]],
                            label_ph:
                            np.squeeze(train_labels[idx[:batch_size]]),
                            is_training_ph: True
                        })

                time_elapsed.append(time.time() - start_time)
                total_loss.append(tl)
                sum_train_accuracy.append(train_acc)
                idx[:batch_size] = []
                if len(idx) < batch_size:
                    idx_ = list(range(train_labels.shape[0]))
                    shuffle(idx_)
                    idx += idx_

                step += 1
                if (step * batch_size) // dataset_len >= init_epoch + epoch_:
                    ## do validation
                    sum_val_accuracy = []
                    for i in range(val_itr):
                        acc = sess.run(
                            accuracy,
                            feed_dict={
                                image_ph:
                                val_images[i * val_batch_size:(i + 1) *
                                           val_batch_size],
                                label_ph:
                                np.squeeze(
                                    val_labels[i * val_batch_size:(i + 1) *
                                               val_batch_size]),
                                is_training_ph:
                                False
                            })
                        sum_val_accuracy.append(acc)

                    sum_train_accuracy = np.mean(sum_train_accuracy) * 100 if (
                        step * batch_size) // dataset_len > init_epoch else 1.
                    sum_val_accuracy = np.mean(sum_val_accuracy) * 100
                    tf.logging.info(
                        'Epoch %s Step %s - train_Accuracy : %.2f%%  val_Accuracy : %.2f%%'
                        % (str(epoch_).rjust(3, '0'), str(step).rjust(
                            6, '0'), sum_train_accuracy, sum_val_accuracy))

                    result_log = sess.run(val_summary_op,
                                          feed_dict={
                                              train_acc_place:
                                              sum_train_accuracy,
                                              val_acc_place: sum_val_accuracy
                                          })
                    logs['training_acc'].append(sum_train_accuracy)
                    logs['validation_acc'].append(sum_val_accuracy)

                    if (
                            step * batch_size
                    ) // dataset_len == init_epoch and FLAGS.Distillation in {
                            'FitNet', 'FSP', 'AB'
                    }:
                        #re-initialize Momentum for fair comparison w/ initialization and multi-task learning methods
                        for v in global_variables:
                            if v.name[:-len('Momentum:0')] == 'Momentum:0':
                                sess.run(
                                    v.assign(
                                        np.zeros(*v.get_shape().as_list())))

                    if step == max_number_of_steps:
                        train_writer.add_summary(result_log, train_epoch)
                    else:
                        train_writer.add_summary(result_log, epoch_)
                    sum_train_accuracy = []
                    epoch_ += 1

                    variables = tf.get_collection(
                        tf.GraphKeys.TRAINABLE_VARIABLES) + tf.get_collection(
                            'BN_collection')

                if step % should_log == 0:
                    tf.logging.info(
                        'global step %s: loss = %.4f (%.3f sec/step)',
                        str(step).rjust(6, '0'), np.mean(total_loss),
                        np.mean(time_elapsed))
                    train_writer.add_summary(log, step)
                    time_elapsed = []
                    total_loss = []

                elif (step * batch_size) % dataset_len == 0:
                    train_writer.add_summary(log, step)

            ## save variables to use for something
            var = {}
            variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES) + tf.get_collection(
                    'BN_collection')
            #            variables = tf.get_collection('MHA')
            for v in variables:
                if v.name.split('/')[0] == FLAGS.main_scope:
                    var[v.name[:-2]] = sess.run(v)

            sio.savemat(FLAGS.train_dir + '/train_params.mat', var)
            sio.savemat(FLAGS.train_dir + '/log.mat', logs)

            ## close all
            tf.logging.info('Finished training! Saving model to disk.')
            train_writer.add_session_log(
                tf.SessionLog(status=tf.SessionLog.STOP))
            train_writer.close()