def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    params = FLAGS.flag_values_dict()
    tf.set_random_seed(params['seed'])

    params['results_dir'] = utils.make_subdir(params['training_results_dir'],
                                              params['expname'])
    params['figdir'] = utils.make_subdir(params['results_dir'], 'figs')
    params['ckptdir'] = utils.make_subdir(params['results_dir'], 'ckpts')
    params['logdir'] = utils.make_subdir(params['results_dir'], 'logs')
    params['tensordir'] = utils.make_subdir(params['results_dir'], 'tensors')

    conv_dims = [int(x) for x in params['conv_dims'].split(',')]
    conv_sizes = [int(x) for x in params['conv_sizes'].split(',')]
    dense_sizes = [int(x) for x in params['dense_sizes'].split(',')]
    params['n_layers'] = len(conv_dims)
    clf = classifier.CNN(conv_dims,
                         conv_sizes,
                         dense_sizes,
                         params['n_classes'],
                         onehot=True)

    utils.checkpoint_model(clf, params['ckptdir'], 'initmodel')

    itr_train, itr_valid, itr_test = dataset_utils.load_dset_supervised_onehot(
    )

    train_cnn.train_classifier(clf, itr_train, itr_valid, params)
    train_cnn.test_classifier(clf, itr_test, params, 'test')

    train_data = utils.aggregate_batches(itr_train, 1000,
                                         ['train_x_infl', 'train_y_infl'])

    validation_data = utils.aggregate_batches(itr_valid, 1000,
                                              ['valid_x_infl', 'valid_y_infl'])

    test_data = utils.aggregate_batches(itr_test, 1000,
                                        ['test_x_infl', 'test_y_infl'])

    utils.save_tensors(
        train_data.items() + validation_data.items() + test_data.items(),
        params['tensordir'])
예제 #2
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    params = FLAGS.flag_values_dict()
    tf.set_random_seed(params['seed'])

    params['results_dir'] = utils.make_subdir(params['results_dir'],
                                              params['expname'])
    params['figdir'] = utils.make_subdir(params['results_dir'], 'figs')
    params['ckptdir'] = utils.make_subdir(params['results_dir'], 'ckpts')
    params['logdir'] = utils.make_subdir(params['results_dir'], 'logs')
    params['tensordir'] = utils.make_subdir(params['results_dir'], 'tensors')

    ood_classes = [int(x) for x in params['ood_classes'].split(',')]
    # We assume we train on all non-OOD classes.
    all_classes = range(params['n_classes'])
    ind_classes = [x for x in all_classes if x not in ood_classes]
    (itr_train, itr_valid, itr_test,
     itr_test_ood) = dataset_utils.load_dset_ood_supervised(
         ind_classes, ood_classes)

    conv_dims = [int(x) for x in params['conv_dims'].split(',')]
    conv_sizes = [int(x) for x in params['conv_sizes'].split(',')]
    clf = CNN(conv_dims, conv_sizes, params['n_classes'])
    params['n_layers'] = len(conv_dims)

    train_cnn.train_classifier(clf, itr_train, itr_valid, params)
    train_cnn.test_classifier(clf, itr_test, params, 'test')

    params['tensordir'] = utils.make_subdir(params['results_dir'],
                                            'train_tensors')
    train_cnn.test_classifier(clf, itr_train, params, 'train')

    params['tensordir'] = utils.make_subdir(params['results_dir'],
                                            'ood_tensors')
    train_cnn.test_classifier(clf, itr_test_ood, params, 'ood')
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    params = FLAGS.flag_values_dict()
    tf.set_random_seed(params['seed'])

    params['results_dir'] = utils.make_subdir(params['training_results_dir'],
                                              params['expname'])
    params['figdir'] = utils.make_subdir(params['results_dir'], 'figs')
    params['ckptdir'] = utils.make_subdir(params['results_dir'], 'ckpts')
    params['logdir'] = utils.make_subdir(params['results_dir'], 'logs')
    params['tensordir'] = utils.make_subdir(params['results_dir'], 'tensors')

    # Load the classification model.
    conv_dims = [
        int(x) for x in (
            params['conv_dims'].split(',') if params['conv_dims'] else [])
    ]
    conv_sizes = [
        int(x) for x in (
            params['conv_sizes'].split(',') if params['conv_sizes'] else [])
    ]
    dense_sizes = [
        int(x) for x in (
            params['dense_sizes'].split(',') if params['dense_sizes'] else [])
    ]
    params['n_layers'] = len(conv_dims)
    clf = classifier.CNN(conv_dims,
                         conv_sizes,
                         dense_sizes,
                         params['n_classes'],
                         onehot=True)

    # Checkpoint the initialized model, in case we want to re-run it from there.
    utils.checkpoint_model(clf, params['ckptdir'], 'initmodel')

    # Load the "in-distribution" and "out-of-distribution" classes as
    # separate splits.
    ood_classes = [int(x) for x in params['ood_classes'].split(',')]
    # We assume we train on all non-OOD classes.
    all_classes = range(params['n_classes'])
    ind_classes = [x for x in all_classes if x not in ood_classes]
    (itr_train, itr_valid, itr_test,
     itr_test_ood) = dataset_utils.load_dset_ood_supervised_onehot(
         ind_classes,
         ood_classes,
         label_noise=(params['label_noise']),
         dset_name=params['dataset_name'])
    # Train and test the model in-distribution, and save test outputs.
    train_cnn.train_classifier(clf, itr_train, itr_valid, params)
    train_cnn.test_classifier(clf, itr_test, params, 'test')

    # Save model outputs on the training set.
    params['tensordir'] = utils.make_subdir(params['results_dir'],
                                            'train_tensors')
    train_cnn.test_classifier(clf, itr_train, params, 'train')

    # Save model outputs on the OOD set.
    params['tensordir'] = utils.make_subdir(params['results_dir'],
                                            'ood_tensors')
    train_cnn.test_classifier(clf, itr_test_ood, params, 'ood')

    params['tensordir'] = utils.make_subdir(params['results_dir'], 'tensors')

    # Save to disk samples of size 1000 from the train, valid, test and OOD sets.
    train_data = utils.aggregate_batches(itr_train, 1000,
                                         ['train_x_infl', 'train_y_infl'])

    validation_data = utils.aggregate_batches(itr_valid, 1000,
                                              ['valid_x_infl', 'valid_y_infl'])

    test_data = utils.aggregate_batches(itr_test, 1000,
                                        ['test_x_infl', 'test_y_infl'])

    ood_data = utils.aggregate_batches(itr_test_ood, 1000,
                                       ['ood_x_infl', 'ood_y_infl'])
    utils.save_tensors(
        train_data.items() + validation_data.items() + test_data.items() +
        ood_data.items(), params['tensordir'])