Beispiel #1
0
def msrc_over_weak(n_train_full=40, n_train=276, 
                   C=100, alpha=0.1,
                   test_method='gco', test_n_iter=5, n_iter=5,
                   max_iter=1000, verbose=1,
                   check_every=50, complete_every=100, update_w_every=50,
                   relaxed_test=False,
                   use_latent_first_iter=100):
    # save parameters as meta
    meta_data = locals()

    logger = logging.getLogger(__name__)

    crf_test = HCRF(n_states=24, n_features=2028, n_edge_features=4, alpha=alpha,
                    inference_method=test_method, n_iter=test_n_iter)
    crf_latent = HCRF(n_states=24, n_features=2028, n_edge_features=4, alpha=alpha,
                      inference_method='gco', n_iter=n_iter)
    trainer = OverWeak(crf_latent, n_states=24, n_features=2028, n_edge_features=4,
                       C=C, alpha=alpha,
                       max_iter=max_iter, verbose=verbose,
                       check_every=check_every, complete_every=complete_every, update_w_every=update_w_every)

    x_train, y_train, y_train_full, x_test, y_test = load_msrc(n_train_full, n_train, dense=True)

    logger.info('start training')

    start = time()
    trainer.fit(x_train, y_train,
                train_scorer=lambda w: compute_score(crf_test, w, x_train, y_train_full, relaxed=relaxed_test),
                test_scorer=lambda w: compute_score(crf_test, w, x_test, y_test, relaxed=relaxed_test),
                use_latent_first_iter=use_latent_first_iter)
    stop = time()
    time_elapsed = stop - start

    logger.info('testing')

    test_score = compute_score(crf_test, trainer.w, x_test, y_test)
    train_score = compute_score(crf_test, trainer.w, x_train, y_train_full)

    logger.info('========================================')
    logger.info('train score: %f', train_score)
    logger.info('test score: %f', test_score)

    exp_data = {}

    exp_data['timestamps'] = trainer.timestamps
    exp_data['objective'] = trainer.objective_curve
    exp_data['w'] = trainer.w
    exp_data['train_scores'] = trainer.train_score
    exp_data['test_scores'] = trainer.test_score
    exp_data['w_history'] = trainer.w_history

    meta_data['dataset_name'] = 'msrc'
    meta_data['annotation_type'] = 'full+weak'
    meta_data['label_type'] = 'image-level labelling'
    meta_data['trainer'] = 'komodakis+latent+kappa'
    meta_data['train_score'] = train_score
    meta_data['test_score'] = test_score
    meta_data['time_elapsed'] = time_elapsed

    return ExperimentResult(exp_data, meta_data)
Beispiel #2
0
def msrc_over_weak(n_train_full=40,
                   n_train=276,
                   C=100,
                   alpha=0.1,
                   test_method='gco',
                   test_n_iter=5,
                   n_iter=5,
                   max_iter=1000,
                   verbose=1,
                   check_every=50,
                   complete_every=100,
                   update_w_every=50,
                   relaxed_test=False,
                   use_latent_first_iter=100):
    # save parameters as meta
    meta_data = locals()

    logger = logging.getLogger(__name__)

    crf_test = HCRF(n_states=24,
                    n_features=2028,
                    n_edge_features=4,
                    alpha=alpha,
                    inference_method=test_method,
                    n_iter=test_n_iter)
    crf_latent = HCRF(n_states=24,
                      n_features=2028,
                      n_edge_features=4,
                      alpha=alpha,
                      inference_method='gco',
                      n_iter=n_iter)
    trainer = OverWeak(crf_latent,
                       n_states=24,
                       n_features=2028,
                       n_edge_features=4,
                       C=C,
                       alpha=alpha,
                       max_iter=max_iter,
                       verbose=verbose,
                       check_every=check_every,
                       complete_every=complete_every,
                       update_w_every=update_w_every)

    x_train, y_train, y_train_full, x_test, y_test = load_msrc(n_train_full,
                                                               n_train,
                                                               dense=True)

    logger.info('start training')

    start = time()
    trainer.fit(x_train,
                y_train,
                train_scorer=lambda w: compute_score(
                    crf_test, w, x_train, y_train_full, relaxed=relaxed_test),
                test_scorer=lambda w: compute_score(
                    crf_test, w, x_test, y_test, relaxed=relaxed_test),
                use_latent_first_iter=use_latent_first_iter)
    stop = time()
    time_elapsed = stop - start

    logger.info('testing')

    test_score = compute_score(crf_test, trainer.w, x_test, y_test)
    train_score = compute_score(crf_test, trainer.w, x_train, y_train_full)

    logger.info('========================================')
    logger.info('train score: %f', train_score)
    logger.info('test score: %f', test_score)

    exp_data = {}

    exp_data['timestamps'] = trainer.timestamps
    exp_data['objective'] = trainer.objective_curve
    exp_data['w'] = trainer.w
    exp_data['train_scores'] = trainer.train_score
    exp_data['test_scores'] = trainer.test_score
    exp_data['w_history'] = trainer.w_history

    meta_data['dataset_name'] = 'msrc'
    meta_data['annotation_type'] = 'full+weak'
    meta_data['label_type'] = 'image-level labelling'
    meta_data['trainer'] = 'komodakis+latent+kappa'
    meta_data['train_score'] = train_score
    meta_data['test_score'] = test_score
    meta_data['time_elapsed'] = time_elapsed

    return ExperimentResult(exp_data, meta_data)
Beispiel #3
0
def syntetic_over_weak(n_train_full=10,
                       n_train=100,
                       C=1,
                       dataset=1,
                       max_iter=100,
                       verbose=1,
                       test_samples=10,
                       check_every=10,
                       test_method='gco',
                       test_n_iter=5,
                       relaxed_test=False,
                       alpha=1,
                       n_iter=5,
                       complete_every=10,
                       update_w_every=5,
                       update_mu=20,
                       use_latent_first_iter=500,
                       undergenerating_weak=False,
                       smd=False):
    # save parameters as meta
    meta_data = locals()

    logger = logging.getLogger(__name__)

    crf_test = HCRF(n_states=10,
                    n_features=10,
                    n_edge_features=2,
                    inference_method=test_method,
                    n_iter=test_n_iter)
    crf_latent = HCRF(n_states=10,
                      n_features=10,
                      n_edge_features=2,
                      alpha=alpha,
                      inference_method='gco',
                      n_iter=n_iter)
    trainer = OverWeak(crf_latent,
                       n_states=10,
                       n_features=10,
                       n_edge_features=2,
                       C=C,
                       max_iter=max_iter,
                       verbose=verbose,
                       check_every=check_every,
                       complete_every=complete_every,
                       alpha=alpha,
                       update_w_every=update_w_every,
                       update_mu=update_mu)

    x_train, y_train, y_train_full, x_test, y_test = \
        load_syntetic(dataset, n_train_full, n_train)
    x_test = x_test[:test_samples]
    y_test = y_test[:test_samples]

    logger.info('start training')

    start = time()
    trainer.fit(x_train,
                y_train,
                train_scorer=lambda w: compute_score(
                    crf_test, w, x_train, y_train_full, relaxed=relaxed_test),
                test_scorer=lambda w: compute_score(
                    crf_test, w, x_test, y_test, relaxed=relaxed_test),
                decompose='grid',
                use_latent_first_iter=use_latent_first_iter,
                undergenerating_weak=undergenerating_weak,
                smd=smd)
    stop = time()
    time_elapsed = stop - start

    logger.info('testing')

    test_score = compute_score(crf_test, trainer.w, x_test, y_test)
    train_score = compute_score(crf_test, trainer.w, x_train, y_train_full)

    logger.info('========================================')
    logger.info('train score: %f', train_score)
    logger.info('test score: %f', test_score)

    exp_data = {}

    exp_data['timestamps'] = trainer.timestamps
    exp_data['objective'] = trainer.objective_curve
    exp_data['w'] = trainer.w
    exp_data['train_scores'] = trainer.train_score
    exp_data['test_scores'] = trainer.test_score
    exp_data['w_history'] = trainer.w_history

    meta_data['dataset_name'] = 'syntetic'
    meta_data['annotation_type'] = 'full+weak'
    meta_data['label_type'] = 'image-level labelling'
    meta_data['trainer'] = 'komodakis+latent+kappa'
    meta_data['train_score'] = train_score
    meta_data['test_score'] = test_score
    meta_data['time_elapsed'] = time_elapsed

    return ExperimentResult(exp_data, meta_data)
Beispiel #4
0
def syntetic_over_weak(n_train_full=10, n_train=100, C=1, dataset=1,
                       max_iter=100, verbose=1,
                       test_samples=10, check_every=10,
                       test_method='gco', test_n_iter=5, relaxed_test=False,
                       alpha=1, n_iter=5, complete_every=10,
                       update_w_every=5, update_mu=20,
                       use_latent_first_iter=500, undergenerating_weak=False,
                       smd=False):
    # save parameters as meta
    meta_data = locals()

    logger = logging.getLogger(__name__)

    crf_test = HCRF(n_states=10, n_features=10, n_edge_features=2,
                    inference_method=test_method, n_iter=test_n_iter)
    crf_latent = HCRF(n_states=10, n_features=10, n_edge_features=2,
                      alpha=alpha, inference_method='gco', n_iter=n_iter)
    trainer = OverWeak(crf_latent, n_states=10, n_features=10, n_edge_features=2,
                       C=C, max_iter=max_iter, verbose=verbose, check_every=check_every,
                       complete_every=complete_every, alpha=alpha, update_w_every=update_w_every,
                       update_mu=update_mu)

    x_train, y_train, y_train_full, x_test, y_test = \
        load_syntetic(dataset, n_train_full, n_train)
    x_test = x_test[:test_samples]
    y_test = y_test[:test_samples]

    logger.info('start training')

    start = time()
    trainer.fit(x_train, y_train,
                train_scorer=lambda w: compute_score(crf_test, w, x_train, y_train_full, relaxed=relaxed_test),
                test_scorer=lambda w: compute_score(crf_test, w, x_test, y_test, relaxed=relaxed_test),
                decompose='grid',
                use_latent_first_iter=use_latent_first_iter, undergenerating_weak=undergenerating_weak,
                smd=smd)
    stop = time()
    time_elapsed = stop - start

    logger.info('testing')

    test_score = compute_score(crf_test, trainer.w, x_test, y_test)
    train_score = compute_score(crf_test, trainer.w, x_train, y_train_full)

    logger.info('========================================')
    logger.info('train score: %f', train_score)
    logger.info('test score: %f', test_score)

    exp_data = {}

    exp_data['timestamps'] = trainer.timestamps
    exp_data['objective'] = trainer.objective_curve
    exp_data['w'] = trainer.w
    exp_data['train_scores'] = trainer.train_score
    exp_data['test_scores'] = trainer.test_score
    exp_data['w_history'] = trainer.w_history

    meta_data['dataset_name'] = 'syntetic'
    meta_data['annotation_type'] = 'full+weak'
    meta_data['label_type'] = 'image-level labelling'
    meta_data['trainer'] = 'komodakis+latent+kappa'
    meta_data['train_score'] = train_score
    meta_data['test_score'] = test_score
    meta_data['time_elapsed'] = time_elapsed

    return ExperimentResult(exp_data, meta_data)