Esempio n. 1
0
    # Create a new model, with this distribution
    new_detector = detector.copy()

    new_detector.kernel_templates = counts
    new_detector.support = None
    new_detector.use_alpha = False

    # Return model
    return new_detector


if __name__ == '__main__':
    import argparse
    from settings import load_settings

    ag.set_verbose(True)

    parser = argparse.ArgumentParser(
        description="Convert model to integrate background model")
    parser.add_argument('settings',
                        metavar='<settings file>',
                        type=argparse.FileType('r'),
                        help='Filename of settings file')
    parser.add_argument('bkg',
                        metavar='<background file>',
                        type=argparse.FileType('rb'),
                        help='Background model file')
    parser.add_argument('output',
                        metavar='<output file>',
                        type=argparse.FileType('wb'),
                        help='Model output file')
Esempio n. 2
0
import numpy as np
import amitgroup as ag
import itertools as itr
import sys
import os


import pnet
import time

def test(ims, labels, net):
    yhat = net.classify(ims)
    return yhat == labels

if pnet.parallel.main(__name__): 
    ag.set_verbose(True)
    print("1")
    import argparse
    parser = argparse.ArgumentParser()
    #parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed')
    #parser.add_argument('param', metavar='<param>', type=string)
    
    parser.add_argument('model',metavar='<model file>',type=argparse.FileType('rb'), help='Filename of model file')
    print("ohhh")
    parser.add_argument('data',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
    parser.add_argument('label',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
    parser.add_argument('numOfClassModel',metavar='<numOfClassModel>', type=int, help='num Of Class Model')

    args = parser.parse_args()

    param = args.model
Esempio n. 3
0
def trial(seed=0):
    eps = 1e-10
    start = time()
    ag.set_verbose(True)
    np.set_printoptions(precision=2, suppress=True)
    K = 2
    eachN = 100000
    N = K * eachN
    M = 500
    np.random.seed(0)
    theta = np.random.random((K, M))
    alphas = (np.random.random((K, M)) < 0.5).astype(float)
    print alphas.mean()
    #alphas = np.tile(np.random.random(M) < 0.5, (K, 1))
    #b = np.clip(np.random.random(M), 0.1, 0.9)
    b = np.ones(M) * 0.2
    print theta
    print b

    np.random.seed(seed + 1000)
    X = np.zeros((N, M))
    A = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            X[k * eachN + i] = np.random.random(M) < theta[k]
            A[k * eachN + i] = alphas[k]

    #X = (np.random.random((N, M)) < 0.5).astype(float)
    X *= A
    end = time()
    print end - start
    model = ag.stats.BernoulliMixture(K, X.astype(np.uint8))
    model.run_EM(eps, 1e-5)
    support = model.remix(A)
    corrected = model.templates + (1 - support) * b
    print "X"
    print X
    print "alphas"
    print A
    #print "b"
    #print b

    N = K * eachN
    X2 = np.zeros((N, M))
    A2 = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            back = (np.random.random(M) < b).astype(float)
            fore = (np.random.random(M) < theta[k]).astype(float)
            X2[k * eachN + i] = alphas[k] * fore + (1 - alphas[k]) * back

    model2 = ag.stats.BernoulliMixture(K, X2.astype(np.uint8))
    model2.run_EM(eps, 1e-5)
    #print X
    #print X2

    aff1 = model.affinities
    aff2 = model2.affinities
    print 's', aff1.shape

    print 'support'
    print support

    model2templates = np.clip(model2.templates, 0.05, 0.95)
    corrected = np.clip(corrected, 0.05, 0.95)
    model1templates = np.clip(model.templates, 0.05, 0.95)
    both = np.fabs(
        [model2templates - corrected,
         model2templates[::-1] - corrected]).sum(axis=1).sum(axis=1)
    if np.argmin(both) == 1:
        model2templates = model2templates[::-1]
        aff2 = aff2[:, ::-1]

    #print aff1
    #print aff2

    print 'model'
    print model1templates
    print 'corrected'
    print corrected
    print 'model2'
    print model2templates
    print '---'
    scores = np.fabs(corrected -
                     model2templates).sum(), np.fabs(model1templates -
                                                     model2templates).sum()
    print scores[0], scores[1]

    return corrected, model2templates, scores
Esempio n. 4
0
alpha = args.alpha[0]
bfrom, bto, eta, bN = args.test_conjugate
bN = int(bN)
surplus_file = args.surplus[0]
eta0 = args.penalty[0]
rho0 = args.rho[0]
if deform_type == 'none':
    deform_type = None

verbose = args.verbose

import numpy as np
from classifier import classify, surplus
from scipy.optimize import fmin
import amitgroup as ag
ag.set_verbose(verbose)

features_data = np.load(feat_file)
all_features = features_data['features']
all_labels = features_data['labels'] 
mixtures_data = np.load(mixtures_file)
all_templates = mixtures_data['templates'] 
coefs = np.load(coef_file)

try:
    all_graylevels = features_data['originals']
    all_graylevel_templates = mixtures_data['graylevel_templates']
except KeyError:
        raise Exception("The feature file must be run with --save-originals, and the mixtures must be trained with this file")

alpha = args.alpha[0]
bfrom, bto, eta, bN = args.test_conjugate
bN = int(bN)
surplus_file = args.surplus[0]
eta0 = args.penalty[0]
rho0 = args.rho[0]
if deform_type == 'none':
    deform_type = None

verbose = args.verbose

import numpy as np
from classifier import classify, surplus
from scipy.optimize import fmin
import amitgroup as ag
ag.set_verbose(verbose)

features_data = np.load(feat_file)
all_features = features_data['features']
all_labels = features_data['labels']
mixtures_data = np.load(mixtures_file)
all_templates = mixtures_data['templates']
coefs = np.load(coef_file)

try:
    all_graylevels = features_data['originals']
    all_graylevel_templates = mixtures_data['graylevel_templates']
except KeyError:
    raise Exception(
        "The feature file must be run with --save-originals, and the mixtures must be trained with this file"
    )
def trial(seed=0):
    eps = 1e-10
    start = time()
    ag.set_verbose(True)
    np.set_printoptions(precision=2, suppress=True)
    K = 2
    eachN = 100000 
    N = K*eachN
    M = 500 
    np.random.seed(0)
    theta = np.random.random((K, M))
    alphas = (np.random.random((K, M)) < 0.5).astype(float)
    print alphas.mean()
    #alphas = np.tile(np.random.random(M) < 0.5, (K, 1))
    #b = np.clip(np.random.random(M), 0.1, 0.9)
    b = np.ones(M) * 0.2
    print theta
    print b

    np.random.seed(seed+1000)
    X = np.zeros((N, M))
    A = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            X[k*eachN+i] = np.random.random(M) < theta[k]
            A[k*eachN+i] = alphas[k]

    #X = (np.random.random((N, M)) < 0.5).astype(float)
    X *= A
    end = time() 
    print end - start
    model = ag.stats.BernoulliMixture(K, X.astype(np.uint8))
    model.run_EM(eps, 1e-5)
    support = model.remix(A)
    corrected = model.templates + (1-support) * b
    print "X"
    print X
    print "alphas"
    print A
    #print "b"
    #print b

    N = K*eachN
    X2 = np.zeros((N, M))
    A2 = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            back = (np.random.random(M) < b).astype(float)
            fore = (np.random.random(M) < theta[k]).astype(float)
            X2[k*eachN+i] = alphas[k] * fore + (1-alphas[k]) * back

    model2 = ag.stats.BernoulliMixture(K, X2.astype(np.uint8))
    model2.run_EM(eps, 1e-5)
    #print X
    #print X2

    aff1 = model.affinities
    aff2 = model2.affinities
    print 's', aff1.shape

    print 'support'
    print support


    model2templates = np.clip(model2.templates, 0.05, 0.95)
    corrected = np.clip(corrected, 0.05, 0.95)
    model1templates = np.clip(model.templates, 0.05, 0.95)
    both = np.fabs([model2templates - corrected, model2templates[::-1] - corrected]).sum(axis=1).sum(axis=1)
    if np.argmin(both) == 1:
        model2templates = model2templates[::-1]
        aff2 = aff2[:,::-1]

    #print aff1
    #print aff2
        
    print 'model'
    print model1templates
    print 'corrected'
    print corrected
    print 'model2'
    print model2templates
    print '---'
    scores = np.fabs(corrected - model2templates).sum(), np.fabs(model1templates - model2templates).sum()
    print scores[0], scores[1]

    return corrected, model2templates, scores
Esempio n. 7
0
def trial(seed=0):
    eps = 1e-10
    start = time()
    ag.set_verbose(True)
    np.set_printoptions(precision=2, suppress=True)
    K = 2
    eachN = 100000
    N = K * eachN
    M = 40
    pool_length = 2
    np.random.seed(0)
    theta = np.random.random((K, M))
    alphas = (np.random.random((K, M)) < 0.5).astype(float)
    print alphas.mean()
    # alphas = np.tile(np.random.random(M) < 0.5, (K, 1))
    # b = np.clip(np.random.random(M), 0.1, 0.9)
    b = np.ones(M) * 0.2
    b2 = np.ones(M // pool_length) * 0.36
    print theta
    print b

    np.random.seed(seed + 1000)
    X = np.zeros((N, M))
    A = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            X[k * eachN + i] = np.random.random(M) < theta[k]
            A[k * eachN + i] = alphas[k]

    # X = (np.random.random((N, M)) < 0.5).astype(float)
    X *= A
    end = time()
    print end - start
    Y = maxpool(X, pool_length)
    model = ag.stats.BernoulliMixture(K, Y.astype(np.uint8))
    model.run_EM(eps, 1e-5)
    B = meanpool(A, pool_length)
    B2 = maxpool(A, pool_length)
    print "#" * 80
    print A.shape
    print B.shape
    support = model.remix(B)
    support2 = model.remix(B2)
    # corrected = model.templates + (1-(support+support2)/2) * b2
    corrected = 1 - (1 - model.templates) * (1 - b[0]) ** (pool_length * (1 - support))
    print "X"
    print X
    print "alphas"
    print A
    # print "b"
    # print b

    N = K * eachN
    X2 = np.zeros((N, M))
    A2 = np.zeros((N, M))
    for k in xrange(K):
        for i in xrange(eachN):
            back = (np.random.random(M) < b).astype(float)
            fore = (np.random.random(M) < theta[k]).astype(float)
            X2[k * eachN + i] = alphas[k] * fore + (1 - alphas[k]) * back

    print "---------->"
    print X2.shape
    Y2 = maxpool(X2, pool_length)
    print Y2.shape
    model2 = ag.stats.BernoulliMixture(K, Y2.astype(np.uint8))
    model2.run_EM(eps, 1e-5)
    # print X
    # print X2

    aff1 = model.affinities
    aff2 = model2.affinities
    print "s", aff1.shape

    print "support"
    print support

    model2templates = np.clip(model2.templates, 0.05, 0.95)
    corrected = np.clip(corrected, 0.05, 0.95)
    model1templates = np.clip(model.templates, 0.05, 0.95)
    both = np.fabs([model2templates - corrected, model2templates[::-1] - corrected]).sum(axis=1).sum(axis=1)
    if np.argmin(both) == 1:
        model2templates = model2templates[::-1]
        aff2 = aff2[:, ::-1]

    # print aff1
    # print aff2

    print "model"
    print model1templates
    print "corrected"
    print corrected
    print "model2"
    print model2templates
    print "---"
    print (corrected - model2templates)
    scores = np.fabs(corrected - model2templates).sum(), np.fabs(model1templates - model2templates).sum()
    print scores[0], scores[1]

    return corrected, model2templates, scores