示例#1
0
def main(unused_argv):
  resnet.resnet_main(FLAGS, cifar10_model_fn, input_fn)
示例#2
0
def main(unused_argv):
    beta_all = np.zeros((nb_groups, 1))
    gamma_all = np.zeros((nb_groups, 1))
    test_accuracy_all = np.zeros((nb_groups, 1))
    for itera in range(nb_groups):
        print('Batch of classes number {0} arrives ...'.format(itera + 1))
        # Adding the stored exemplars to the training set
        if itera == 0:
            # train data
            x_train_from_cl = x_train[itera][:]
            y_train_from_cl = y_train[itera][:]
            # val data
            x_val_from_cl = []
            y_val_from_cl = []
            for i in range((itera + 1) * nb_cl):
                x_val_from_cl += x_val[i][:]
                y_val_from_cl += y_val[i][:]
            # test data
            x_test_from_cl = x_test[itera][:]
            y_test_from_cl = y_test[itera][:]
        else:
            nb_val_itera = int(np.ceil(nb_val * nb_proto / (nb_cl * itera)))
            nb_protos_cl = int(
                math.ceil(nb_proto / (nb_cl * itera))
            ) - nb_val_itera  # Reducing number of exemplars of the previous classes

            # train data
            x_train_from_cl = x_train[itera][:]
            y_train_from_cl = y_train[itera][:]
            for i in range(itera * nb_cl):
                x_tmp_var = x_train_protoset[i]
                y_tmp_var = y_train_protoset[i]
                x_train_from_cl += x_tmp_var[0:min(len(x_tmp_var), nb_protos_cl
                                                   )]
                y_train_from_cl += y_tmp_var[0:min(len(y_tmp_var), nb_protos_cl
                                                   )]
            # val data
            x_val_from_cl = []
            y_val_from_cl = []
            for i in range((itera + 1) * nb_cl):
                x_val_from_cl += x_val[i][0:nb_val_itera]
                y_val_from_cl += y_val[i][0:nb_val_itera]

            # test data
            x_test_from_cl = x_test[itera][:]
            y_test_from_cl = y_test[itera][:]
            for i in range(itera):
                x_test_from_cl += x_test[i]
                y_test_from_cl += y_test[i]

        if itera == 0:
            print("first itera, #training: {}, #val: {}, #test: {}".format(
                len(x_train_from_cl), len(x_val_from_cl), len(x_test_from_cl)))
            print(len(y_train_from_cl))
            x_train_resnet_features, beta, gamma, test_accuracy = resnet.resnet_main(
                FLAGS, imagenet_model_fn, input_fn, x_train_from_cl,
                y_train_from_cl, x_val_from_cl, y_val_from_cl, x_test_from_cl,
                y_test_from_cl, itera, nb_groups, nb_cl, 1.0, 0.0)
        else:
            print("incremental iteras, #training: {}, #val: {}, #test: {}".
                  format(len(x_train_from_cl), len(x_val_from_cl),
                         len(x_test_from_cl)))
            x_train_resnet_features, beta, gamma, test_accuracy = resnet.resnet_main(
                FLAGS, imagenet_model_fn, input_fn, x_train_from_cl,
                y_train_from_cl, x_val_from_cl, y_val_from_cl, x_test_from_cl,
                y_test_from_cl, itera, nb_groups, nb_cl, beta_all[itera - 1],
                gamma_all[itera - 1])

        beta_all[itera] = beta
        gamma_all[itera] = gamma
        test_accuracy_all[itera] = test_accuracy
        beta_result = ""
        gamma_result = ""
        accuracy_result = ""
        for i in range(itera + 1):
            beta_result = "{} {}".format(beta_result, beta_all[i])
            gamma_result = "{} {}".format(gamma_result, gamma_all[i])
            accuracy_result = "{} {}".format(accuracy_result,
                                             test_accuracy_all[i])
        print("beta    : {}".format(beta_result))
        print("gamma   : {}".format(gamma_result))
        print("accuracy: {}".format(accuracy_result))

        ## for last increment, do not need to select the exemplars
        if itera == nb_groups - 1:
            break
        ## Exemplars management part  ##
        nb_val_next_itera = int(
            math.ceil(nb_val * nb_proto / (nb_cl * (itera + 1))))
        nb_protos_cl = int(math.ceil(nb_proto /
                                     (nb_cl *
                                      (itera + 1)))) - nb_val_next_itera

        print("val for next iter:", nb_val_next_itera)
        print("exemplars next time for train", nb_protos_cl)

        resnet_features_ = []
        for resnet_feature in x_train_resnet_features:
            resnet_features_.append(resnet_feature['features'])
        print('Exemplars selection starting ...')

        for iter_dico in range((itera + 1) * nb_cl):
            x_train_protoset[iter_dico] = []
            y_train_protoset[iter_dico] = []
            y_train_from_cl_ = np.asarray(y_train_from_cl, np.int16)
            ## np.int8 only works for less than 100 classes
            # y_train_from_cl_ = np.asarray(y_train_from_cl, np.int8)
            ind_cl = np.where(y_train_from_cl_ == iter_dico)[0]
            D = np.asarray(resnet_features_, np.float32)[ind_cl]
            mu = np.mean(D, axis=0)

            selected = []
            selected_feat = []

            # select nb_protos_cl samples
            for k in range(nb_protos_cl):
                # 512 is the dimension of features in resnet
                sum_others = np.zeros(512)
                for j in selected_feat:
                    sum_others += j / (k + 1)

                dist_min = np.inf
                assert (len(ind_cl) > 0)
                for item in ind_cl:
                    if item not in selected:
                        feat = resnet_features_[item]
                        dist = np.linalg.norm(mu - feat / (k + 1) - sum_others)
                        if dist < dist_min:
                            dist_min = dist
                            newone = item
                            newonefeat = feat
                selected_feat.append(newonefeat)
                selected.append(newone)

                x_train_protoset[iter_dico].append(x_train_from_cl[newone])
                error_message = str(
                    y_train_from_cl[newone]) + " " + str(iter_dico)
                # label should be the same with iter_dico
                assert (y_train_from_cl[newone] == iter_dico), error_message
                y_train_protoset[iter_dico].append(y_train_from_cl[newone])
def main(unused_argv):
    resnet.resnet_main(FLAGS, imagenet_model_fn, input_fn)
示例#4
0
def main(unused_argv):
    resnet.resnet_main(FLAGS, cifar10_model_fn, input_fn)
示例#5
0
    :param params: the parameters to be passed to the model
    :return: a resnet model fn
    """
    features = tf.reshape(features, [-1, _WIDTH, _HEIGHT, _CHANNELS])

    # the function that decays learning rate overtime
    learning_rate_fn = resnet.learning_rate_with_decay(
        num_images=_NUM_IMAGES['train'],
        batch_size=params['batch_size'],
        boundary_epochs=[100, 150, 200],
        decay_rates=[1, 0.1, 0.01, 0.001])
    weight_decay = 2e-4
    return resnet_model.resnet_model_fn(features, labels, mode, Cifar10Model,
                                        params['resnet_size'], weight_decay,
                                        learning_rate_fn, 0.9,
                                        params['data_format'],
                                        params['multi_gpu'])


if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.INFO)
    argparser = resnet.ResnetArgParser()
    argparser.set_defaults(data_dir='/tmp/cifar10_data',
                           model_dir='/tmp/cifar10_model',
                           resnet_size=32,
                           train_epochs=250,
                           epochs_per_eval=10,
                           batch_size=128)
    FLAGS, unparse = argparser.parse_known_args()
    resnet.resnet_main(FLAGS, cifar_model_fn)
    tf.app.run()
示例#6
0
def main(unused_argv):
  resnet.resnet_main(FLAGS, imagenet_model_fn, input_fn)
def main(unused_argv):
    input_function = FLAGS.use_synthetic_data and get_synth_input_fn(
    ) or input_fn
    resnet.resnet_main(FLAGS, imagenet_model_fn, input_function)
    FLAGS.epochs_per_eval = 10
    FLAGS.batch_size = 128
    FLAGS.data_dir = '/home/jundp/Data/cifar10'
    FLAGS.base_model_dir = '/home/jundp/Data/adamw/param_grid_warm_restarts/cifar10_'
    FLAGS.num_parallel_calls = 5
    FLAGS.data_format = 'channels_first'
    for opt_name, optimizer, decoupled in (('momentumw',
                                            tf.contrib.opt.MomentumWOptimizer,
                                            True), ('momentum',
                                                    tf.train.MomentumOptimizer,
                                                    False),
                                           ('adam', tf.train.AdamOptimizer,
                                            False),
                                           ('adamw',
                                            tf.contrib.opt.AdamWOptimizer,
                                            True)):
        FLAGS.decouple_weight_decay = decoupled
        for lr in [0.1, 0.01, 0.001, 1.0, 10.0]:
            for weight_decay in [0.00001, 0.0001, 0.001]:
                FLAGS.model_dir = (FLAGS.base_model_dir + opt_name + "lr_" +
                                   str(lr) + "_weight_decay_" +
                                   str(weight_decay))
                if os.path.isdir(FLAGS.model_dir):
                    continue  # don't train already trained models
                print(FLAGS.model_dir)
                model_fn = model_fn_factory(lr, weight_decay, optimizer)
                try:
                    resnet.resnet_main(FLAGS, model_fn, input_fn)
                except tf.train.NanLossDuringTrainingError:
                    print("Nan error.")
示例#9
0
def main(unused_argv):
  input_function = FLAGS.use_synthetic_data and get_synth_input_fn() or input_fn
  resnet.resnet_main(FLAGS, imagenet_model_fn, input_function)
示例#10
0
                                  resnet_size=params['resnet_size'],
                                  weight_decay=1e-4,
                                  learning_rate_fn=learning_rate_fn,
                                  momentum=0.9,
                                  data_format=params['data_format'],
                                  loss_filter_fn=None,
                                  multi_gpu=params['multi_gpu'])


if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.DEBUG)

    env = TrainingEnvironment()

    try:
        print(env.hyperparameters)
        hyperparameters = HParams(
            model_dir=env.model_dir,
            data_format=None,
            training_channel='training',
            evaluation_channel='validation',
            training_dir=env.channel_dirs['training'],
            evaluation_dir=env.channel_dirs['validation'],
            **env.hyperparameters)

        resnet.resnet_main(hyperparameters, imagenet_model_fn, input_fn)
    except Exception as e:
        env.write_failure_file(str(e))
    else:
        env.write_success_file()
示例#11
0
def main(unused_argv):
    input_function = input_fn_new  #get_synth_input_fn()
    resnet.resnet_main(FLAGS, fashionai_model_fn, input_function)