def evaluate(Xs, ys, BATCH_SIZE, net_mask=dummy_mask):
    nonzeros = []
    for a in activations:
        assert len(a.shape) == 2
        nonzeros.append(np.zeros(int(a.shape[1])))

    eval_gen = data.classifier_generator((Xs, ys), BATCH_SIZE, infinity=False)
    _total_losses = []
    _total_acc = []
    labels_to_return = [[], []]
    # print('we are before entering eval_gen')
    for X_batch, y_batch in eval_gen:
        value_list = session.run([total_loss, output, activations] +
                                 list(cov_ops),
                                 feed_dict={
                                     inputs: X_batch,
                                     labels: y_batch,
                                     mask: net_mask
                                 })
        (_total_loss, predicted, _activations) = value_list[:3]
        # labels_to_return += [(y_batch, np.argmax(predicted, axis=1))]
        labels_to_return[0] += [y_batch.tolist()]
        labels_to_return[1] += [np.argmax(predicted, axis=1).tolist()]
        _total_acc.append(accuracy(predicted, y_batch))
        _total_losses.append(_total_loss)
        for i, a in enumerate(_activations):
            nonzeros[i] += np.count_nonzero(a, axis=0)

    eval_loss = np.mean(_total_losses)
    eval_acc = np.mean(_total_acc)
    for i, _ in enumerate(nonzeros):
        nonzeros[i] = nonzeros[i] * 1.0 / (len(Xs))
        nonzeros[i] = np.histogram(nonzeros[i], bins=10, range=(0.0, 1.0))[0]

    return eval_loss, eval_acc, nonzeros, activations, zs, labels_to_return
Exemple #2
0
def evaluate(X_devel, y_devel):
    dev_disc_costs = []
    dev_real_disc_outputs = []
    dev_real_labels = []

    for _real_data_devel in data.classifier_generator((X_devel, y_devel), BATCH_SIZE, infinity=False):
        _dev_disc_cost, _dev_real_disc_output = session.run(
            [disc_cost, disc_real],
            feed_dict={
                real_data: _real_data_devel[0], real_labels: _real_data_devel[1], dropout:1.0}
        )
        dev_disc_costs.append(_dev_disc_cost)
        dev_real_disc_outputs.append(_dev_real_disc_output)
        dev_real_labels.append(_real_data_devel[1])

    dev_real_disc_outputs = np.concatenate(dev_real_disc_outputs)
    dev_real_labels = np.concatenate(dev_real_labels)
    dev_cost = np.mean(dev_disc_costs)
    dev_acc = accuracy(dev_real_disc_outputs, dev_real_labels)

    # log losses and extras
    dev_summary_loss, dev_summary_extra = session.run([merged_loss_summary_op, merged_extra_summary_op],
                                                      feed_dict={
                                                          dropout: 1.0,
                                                          real_data: _real_data_devel[0],
                                                          real_labels: _real_data_devel[1]
                                                      })
    
    return dev_cost, dev_acc, dev_summary_loss, dev_summary_extra
Exemple #3
0
def evaluate(Xs, ys, BATCH_SIZE):
    nonzeros = []
    for a in activations:
        assert len(a.shape) == 2
        nonzeros.append(np.zeros(int(a.shape[1])))

    eval_gen = data.classifier_generator((Xs, ys), BATCH_SIZE, infinity=False)
    _total_losses = []
    _total_acc = []
    _pred_list = []
    for X_batch, y_batch in eval_gen:
        value_list = session.run([total_loss, output, activations] +
                                 list(cov_ops),
                                 feed_dict={
                                     inputs: X_batch,
                                     labels: y_batch if CLASSIFIER else X_batch
                                 })
        (_total_loss, predicted, _activations) = value_list[:3]
        _total_acc.append(accuracy(predicted, y_batch))
        _total_losses.append(_total_loss)
        for i, a in enumerate(_activations):
            nonzeros[i] += np.count_nonzero(a, axis=0)

    eval_loss = np.mean(_total_losses)
    eval_acc = np.mean(_total_acc)
    for i, _ in enumerate(nonzeros):
        nonzeros[i] = nonzeros[i] / (len(Xs))
        nonzeros[i] = np.histogram(nonzeros[i], range=(0.0, 1.0))[0]
    return eval_loss, eval_acc, nonzeros
# os.nice(20)

##########################################

dummy_mask = np.ones((DEPTH, WIDTH))
# dummy_mask[0, :] = np.zeros(WIDTH)
# dummy_mask[0, 2:5] = np.ones(3)

(X_train, y_train), (X_devel, y_devel), (X_test, y_test) = data.load_data(
    DATASET, SEED, USEFULNESS_EVAL_SET_SIZE)

X_train = X_train[:TRAINSIZE]
y_train = y_train[:TRAINSIZE]
INPUT_SHAPE = X_train.shape[1:]
train_gen = data.classifier_generator((X_train, y_train),
                                      BATCH_SIZE,
                                      augment=AUGMENTATION)
inputs = tf.placeholder(tf.float32, shape=[BATCH_SIZE] + list(INPUT_SHAPE))
mask = tf.placeholder(tf.float32, shape=[DEPTH, WIDTH])
if CLASSIFIER_TYPE == "dense":
    output, activations, zs = networks.DenseNet(inputs,
                                                DEPTH,
                                                WIDTH,
                                                BN_DO,
                                                OUTPUT_COUNT,
                                                dropout=0.5,
                                                mask=mask)
elif CLASSIFIER_TYPE == "conv":
    output, activations, zs = networks.LeNet(inputs,
                                             BN_DO,
                                             OUTPUT_COUNT,
Exemple #5
0
    DATAGRAD,
    COMBINE_OUTPUTS_MODE,
    COMBINE_TOPK,
    ENTROPY_PENALTY,
    DROPOUT_KEEP_PROB,
    WIDENESS,
    time.strftime('%Y%m%d-%H%M%S'))

(X_train, y_train), (X_devel, y_devel), (X_test, y_test) = data.load_set(DATASET, TRAIN_DATASET_SIZE, DEVEL_DATASET_SIZE, TEST_DATASET_SIZE, seed=RANDOM_SEED)


INPUT_SHAPE = X_train.shape[1:]
INPUT_DIM = np.prod(INPUT_SHAPE)


real_gen = data.classifier_generator((X_train, y_train), BATCH_SIZE, augment=AUGMENTATION)

lib.print_model_settings(locals().copy())

dropout = tf.placeholder("float")
Discriminator = networks.Discriminator_factory(DISC_TYPE, DIM, INPUT_SHAPE, BATCH_SIZE, DO_BATCHNORM, OUTPUT_COUNT, REUSE_BATCHNORM=True, dropout=dropout, wideness=WIDENESS)

real_labels = tf.placeholder(tf.uint8, shape=[BATCH_SIZE])
real_labels_onehot = tf.one_hot(real_labels, OUTPUT_COUNT)

real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, INPUT_DIM])
disc_real = Discriminator(real_data)

if DISC_TYPE == "cifarResnet":
    disc_params = tf.trainable_variables()
else: