コード例 #1
0
ファイル: kernel.py プロジェクト: joseph-chan/rqpersonalsvn
def _run_top_fisher():
    """Run Linear Kernel with {Top,Fisher}Features."""

    # put some constantness into randomness
    Math_init_random(dataop.INIT_RANDOM)

    data = dataop.get_cubes(4, 8)
    prefix = "topfk_"
    params = {
        prefix + "N": 3,
        prefix + "M": 6,
        prefix + "pseudo": 1e-1,
        prefix + "order": 1,
        prefix + "gap": 0,
        prefix + "reverse": False,
        prefix + "alphabet": "CUBE",
        prefix + "feature_class": "string_complex",
        prefix + "feature_type": "Word",
        prefix + "data_train": numpy.matrix(data["train"]),
        prefix + "data_test": numpy.matrix(data["test"]),
    }

    wordfeats = featop.get_features(
        params[prefix + "feature_class"],
        params[prefix + "feature_type"],
        data,
        eval(params[prefix + "alphabet"]),
        params[prefix + "order"],
        params[prefix + "gap"],
        params[prefix + "reverse"],
    )
    pos_train = HMM(wordfeats["train"], params[prefix + "N"], params[prefix + "M"], params[prefix + "pseudo"])
    pos_train.train()
    pos_train.baum_welch_viterbi_train(BW_NORMAL)
    neg_train = HMM(wordfeats["train"], params[prefix + "N"], params[prefix + "M"], params[prefix + "pseudo"])
    neg_train.train()
    neg_train.baum_welch_viterbi_train(BW_NORMAL)
    pos_test = HMM(pos_train)
    pos_test.set_observations(wordfeats["test"])
    neg_test = HMM(neg_train)
    neg_test.set_observations(wordfeats["test"])
    feats = {}

    feats["train"] = TOPFeatures(10, pos_train, neg_train, False, False)
    feats["test"] = TOPFeatures(10, pos_test, neg_test, False, False)
    params[prefix + "name"] = "TOP"
    _compute_top_fisher(feats, params)

    feats["train"] = FKFeatures(10, pos_train, neg_train)
    feats["train"].set_opt_a(-1)  # estimate prior
    feats["test"] = FKFeatures(10, pos_test, neg_test)
    feats["test"].set_a(feats["train"].get_a())  # use prior from training data
    params[prefix + "name"] = "FK"
    _compute_top_fisher(feats, params)
コード例 #2
0
def _run_top_fisher():
    """Run Linear Kernel with {Top,Fisher}Features."""

    # put some constantness into randomness
    Math_init_random(dataop.INIT_RANDOM)

    data = dataop.get_cubes(4, 8)
    prefix = 'topfk_'
    params = {
        prefix + 'N': 3,
        prefix + 'M': 6,
        prefix + 'pseudo': 1e-1,
        prefix + 'order': 1,
        prefix + 'gap': 0,
        prefix + 'reverse': False,
        prefix + 'alphabet': 'CUBE',
        prefix + 'feature_class': 'string_complex',
        prefix + 'feature_type': 'Word',
        prefix + 'data_train': numpy.matrix(data['train']),
        prefix + 'data_test': numpy.matrix(data['test'])
    }

    wordfeats = featop.get_features(params[prefix + 'feature_class'],
                                    params[prefix + 'feature_type'], data,
                                    eval(params[prefix + 'alphabet']),
                                    params[prefix + 'order'],
                                    params[prefix + 'gap'],
                                    params[prefix + 'reverse'])
    pos_train = HMM(wordfeats['train'], params[prefix + 'N'],
                    params[prefix + 'M'], params[prefix + 'pseudo'])
    pos_train.train()
    pos_train.baum_welch_viterbi_train(BW_NORMAL)
    neg_train = HMM(wordfeats['train'], params[prefix + 'N'],
                    params[prefix + 'M'], params[prefix + 'pseudo'])
    neg_train.train()
    neg_train.baum_welch_viterbi_train(BW_NORMAL)
    pos_test = HMM(pos_train)
    pos_test.set_observations(wordfeats['test'])
    neg_test = HMM(neg_train)
    neg_test.set_observations(wordfeats['test'])
    feats = {}

    feats['train'] = TOPFeatures(10, pos_train, neg_train, False, False)
    feats['test'] = TOPFeatures(10, pos_test, neg_test, False, False)
    params[prefix + 'name'] = 'TOP'
    _compute_top_fisher(feats, params)

    feats['train'] = FKFeatures(10, pos_train, neg_train)
    feats['train'].set_opt_a(-1)  #estimate prior
    feats['test'] = FKFeatures(10, pos_test, neg_test)
    feats['test'].set_a(feats['train'].get_a())  #use prior from training data
    params[prefix + 'name'] = 'FK'
    _compute_top_fisher(feats, params)
コード例 #3
0
ファイル: kernel.py プロジェクト: AsherBond/shogun
def _run_top_fisher ():
	"""Run Linear Kernel with {Top,Fisher}Features."""

	# put some constantness into randomness
	Math_init_random(dataop.INIT_RANDOM)

	data=dataop.get_cubes(4, 8)
	prefix='topfk_'
	params={
		prefix+'N': 3,
		prefix+'M': 6,
		prefix+'pseudo': 1e-1,
		prefix+'order': 1,
		prefix+'gap': 0,
		prefix+'reverse': False,
		prefix+'alphabet': 'CUBE',
		prefix+'feature_class': 'string_complex',
		prefix+'feature_type': 'Word',
		prefix+'data_train': numpy.matrix(data['train']),
		prefix+'data_test': numpy.matrix(data['test'])
	}

	wordfeats=featop.get_features(
		params[prefix+'feature_class'], params[prefix+'feature_type'],
		data, eval(params[prefix+'alphabet']),
		params[prefix+'order'], params[prefix+'gap'], params[prefix+'reverse'])
	pos_train=HMM(wordfeats['train'],
		params[prefix+'N'], params[prefix+'M'], params[prefix+'pseudo'])
	pos_train.train()
	pos_train.baum_welch_viterbi_train(BW_NORMAL)
	neg_train=HMM(wordfeats['train'],
		params[prefix+'N'], params[prefix+'M'], params[prefix+'pseudo'])
	neg_train.train()
	neg_train.baum_welch_viterbi_train(BW_NORMAL)
	pos_test=HMM(pos_train)
	pos_test.set_observations(wordfeats['test'])
	neg_test=HMM(neg_train)
	neg_test.set_observations(wordfeats['test'])
	feats={}

	feats['train']=TOPFeatures(10, pos_train, neg_train, False, False)
	feats['test']=TOPFeatures(10, pos_test, neg_test, False, False)
	params[prefix+'name']='TOP'
	_compute_top_fisher(feats, params)

	feats['train']=FKFeatures(10, pos_train, neg_train)
	feats['train'].set_opt_a(-1) #estimate prior
	feats['test']=FKFeatures(10, pos_test, neg_test)
	feats['test'].set_a(feats['train'].get_a()) #use prior from training data
	params[prefix+'name']='FK'
	_compute_top_fisher(feats, params)
コード例 #4
0
def _run_hmm():
    """Run generator for Hidden-Markov-Model."""

    # put some constantness into randomness
    Math_init_random(INIT_RANDOM)

    num_examples = 4
    params = {
        'name': 'HMM',
        'accuracy': 1e-6,
        'N': 3,
        'M': 6,
        'num_examples': num_examples,
        'pseudo': 1e-10,
        'order': 1,
        'alphabet': 'CUBE',
        'feature_class': 'string_complex',
        'feature_type': 'Word',
        'data': dataop.get_cubes(num_examples, 1)
    }
    output = fileop.get_output(category.DISTRIBUTION, params)

    feats = featop.get_features(params['feature_class'],
                                params['feature_type'], params['data'],
                                eval('features.' + params['alphabet']),
                                params['order'])

    hmm = distribution.HMM(feats['train'], params['N'], params['M'],
                           params['pseudo'])
    hmm.train()
    hmm.baum_welch_viterbi_train(distribution.BW_NORMAL)

    output[PREFIX + 'likelihood'] = hmm.get_log_likelihood_sample()
    output[PREFIX + 'derivatives'] = _get_derivatives(
        hmm, feats['train'].get_num_vectors())

    output[PREFIX + 'best_path'] = 0
    output[PREFIX + 'best_path_state'] = 0
    for i in xrange(num_examples):
        output[PREFIX + 'best_path'] += hmm.best_path(i)
        for j in xrange(params['N']):
            output[PREFIX + 'best_path_state'] += hmm.get_best_path_state(i, j)

    fileop.write(category.DISTRIBUTION, output)
コード例 #5
0
def _run_hmm ():
	"""Run generator for Hidden-Markov-Model."""

	# put some constantness into randomness
	Math_init_random(INIT_RANDOM)

	num_examples=4
	params={
		'name': 'HMM',
		'accuracy': 1e-6,
		'N': 3,
		'M': 6,
		'num_examples': num_examples,
		'pseudo': 1e-10,
		'order': 1,
		'alphabet': 'CUBE',
		'feature_class': 'string_complex',
		'feature_type': 'Word',
		'data': dataop.get_cubes(num_examples, 1)
	}
	output=fileop.get_output(category.DISTRIBUTION, params)

	feats=featop.get_features(
		params['feature_class'], params['feature_type'], params['data'],
		eval('features.'+params['alphabet']), params['order'])

	hmm=distribution.HMM(
		feats['train'], params['N'], params['M'], params['pseudo'])
	hmm.train()
	hmm.baum_welch_viterbi_train(distribution.BW_NORMAL)

	output[PREFIX+'likelihood']=hmm.get_log_likelihood_sample()
	output[PREFIX+'derivatives']=_get_derivatives(
		hmm, feats['train'].get_num_vectors())

	output[PREFIX+'best_path']=0
	output[PREFIX+'best_path_state']=0
	for i in xrange(num_examples):
		output[PREFIX+'best_path']+=hmm.best_path(i)
		for j in xrange(params['N']):
			output[PREFIX+'best_path_state']+=hmm.get_best_path_state(i, j)

	fileop.write(category.DISTRIBUTION, output)