Exemple #1
0
def _dsebm_experiment(dataset_load_fn, dataset_name, single_class_ind, gpu_q):
    # gpu_to_use = gpu_q.get()
    # os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    n_channels = x_train.shape[get_channels_axis()]
    input_side = x_train.shape[2]  # image side will always be at shape[2]
    encoder_mdl = conv_encoder(input_side,
                               n_channels,
                               representation_activation='relu')
    energy_mdl = dsebm.create_energy_model(encoder_mdl)
    reconstruction_mdl = dsebm.create_reconstruction_model(energy_mdl)

    # optimization parameters
    batch_size = 128
    epochs = 200
    reconstruction_mdl.compile('adam', 'mse')
    x_train_task = x_train[y_train.flatten() == single_class_ind]
    x_test_task = x_test[y_test.flatten(
    ) == single_class_ind]  # This is just for visual monitoring
    reconstruction_mdl.fit(x=x_train_task,
                           y=x_train_task,
                           batch_size=batch_size,
                           epochs=epochs,
                           validation_data=(x_test_task, x_test_task))

    scores = -energy_mdl.predict(x_test, batch_size)
    labels = y_test.flatten() == single_class_ind
    res_file_name = '{}_dsebm_{}_{}.npz'.format(
        dataset_name, get_class_name_from_index(single_class_ind,
                                                dataset_name),
        datetime.datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)
Exemple #2
0
def _cae_ocsvm_experiment(dataset_load_fn, dataset_name, single_class_ind,
                          gpu_q):
    # gpu_to_use = gpu_q.get()
    # os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    print('data_shape', x_train.shape)

    n_channels = x_train.shape[get_channels_axis()]
    input_side = x_train.shape[2]  # channel side will always be at shape[2]
    enc = conv_encoder(input_side, n_channels)
    dec = conv_decoder(input_side, n_channels)
    # print(input_side)
    # print(dec.summary())
    x_in = Input(shape=x_train.shape[1:])
    x_rec = dec(enc(x_in))
    cae = Model(x_in, x_rec)
    cae.compile('adam', 'mse')

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    x_test_task = x_test[y_test.flatten(
    ) == single_class_ind]  # This is just for visual monitoring
    cae.fit(x=x_train_task,
            y=x_train_task,
            batch_size=128,
            epochs=200,
            validation_data=(x_test_task, x_test_task))

    x_train_task_rep = enc.predict(x_train_task, batch_size=128)
    if dataset_name in LARGE_DATASET_NAMES:  # OC-SVM is quadratic on the number of examples, so subsample training set
        subsample_inds = np.random.choice(len(x_train_task_rep),
                                          2500,
                                          replace=False)
        x_train_task_rep_temp = x_train_task_rep[subsample_inds]

    x_test_rep = enc.predict(x_test, batch_size=128)
    pg = ParameterGrid({
        'nu': np.linspace(0.1, 0.9, num=9),
        'gamma': np.logspace(-7, 2, num=10, base=2)
    })

    results = Parallel(n_jobs=PARALLEL_N_JOBS)(delayed(
        _train_ocsvm_and_score)(d, x_train_task_rep_temp, y_test.flatten() ==
                                single_class_ind, x_test_rep) for d in pg)

    best_params, best_auc_score = max(zip(pg, results), key=lambda t: t[-1])
    print(best_params)
    best_ocsvm = OneClassSVM(**best_params).fit(x_train_task_rep)
    scores = best_ocsvm.decision_function(x_test_rep)
    labels = y_test.flatten() == single_class_ind

    res_file_name = '{}_cae-oc-svm_{}_{}.npz'.format(
        dataset_name, get_class_name_from_index(single_class_ind,
                                                dataset_name),
        datetime.datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)
Exemple #3
0
def _adgan_experiment(dataset_load_fn, dataset_name, single_class_ind, gpu_q):
    # gpu_to_use = gpu_q.get()
    # os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()
    if len(x_test) > 5000:
        # subsample x_test due to runtime complexity
        chosen_inds = np.random.choice(len(x_test), 5000, replace=False)
        x_test = x_test[chosen_inds]
        y_test = y_test[chosen_inds]

    n_channels = x_train.shape[get_channels_axis()]
    input_side = x_train.shape[2]  # image side will always be at shape[2]
    critic = conv_encoder(input_side,
                          n_channels,
                          representation_dim=1,
                          representation_activation='linear')
    noise_size = 256
    generator = conv_decoder(input_side,
                             n_channels=n_channels,
                             representation_dim=noise_size)

    def prior_gen(b_size):
        return np.random.normal(size=(b_size, noise_size))

    batch_size = 128
    epochs = 100

    x_train_task = x_train[y_train.flatten() == single_class_ind]

    def data_gen(b_size):
        chosen_inds = np.random.choice(len(x_train_task),
                                       b_size,
                                       replace=False)
        return x_train_task[chosen_inds]

    adgan.train_wgan_with_grad_penalty(prior_gen,
                                       generator,
                                       data_gen,
                                       critic,
                                       batch_size,
                                       epochs,
                                       grad_pen_coef=20)

    scores = adgan.scores_from_adgan_generator(x_test, prior_gen, generator)
    labels = y_test.flatten() == single_class_ind
    res_file_name = '{}_adgan_{}_{}.npz'.format(
        dataset_name, get_class_name_from_index(single_class_ind,
                                                dataset_name),
        datetime.datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)
def _dagmm_experiment(dataset_load_fn, dataset_name, single_class_ind, gpu_q):
    gpu_to_use = gpu_q.get()
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    n_channels = x_train.shape[get_channels_axis()]
    input_side = x_train.shape[2]  # image side will always be at shape[2]
    enc = conv_encoder(input_side, n_channels, representation_dim=5,
                       representation_activation='linear')
    dec = conv_decoder(input_side, n_channels=n_channels, representation_dim=enc.output_shape[-1])
    n_components = 3
    estimation = Sequential([Dense(64, activation='tanh', input_dim=enc.output_shape[-1] + 2), Dropout(0.5),
                             Dense(10, activation='tanh'), Dropout(0.5),
                             Dense(n_components, activation='softmax')]
                            )

    batch_size = 256
    epochs = 200
    lambda_diag = 0.0005
    lambda_energy = 0.01
    dagmm_mdl = dagmm.create_dagmm_model(enc, dec, estimation, lambda_diag)
    dagmm_mdl.compile('adam', ['mse', lambda y_true, y_pred: lambda_energy*y_pred])

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    x_test_task = x_test[y_test.flatten() == single_class_ind]  # This is just for visual monitoring
    dagmm_mdl.fit(x=x_train_task, y=[x_train_task, np.zeros((len(x_train_task), 1))],  # second y is dummy
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test_task, [x_test_task, np.zeros((len(x_test_task), 1))]),
                  # verbose=0
                  )

    energy_mdl = Model(dagmm_mdl.input, dagmm_mdl.output[-1])

    scores = -energy_mdl.predict(x_test, batch_size)
    scores = scores.flatten()
    if not np.all(np.isfinite(scores)):
        min_finite = np.min(scores[np.isfinite(scores)])
        scores[~np.isfinite(scores)] = min_finite - 1
    labels = y_test.flatten() == single_class_ind
    res_file_name = '{}_dagmm_{}_{}.npz'.format(dataset_name,
                                                get_class_name_from_index(single_class_ind, dataset_name),
                                                datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)

    gpu_q.put(gpu_to_use)