예제 #1
0
파일: Scatter2.py 프로젝트: mmssouza/coevol
def scatter(ss, fig):
    db = {}
    for im_file in nomes:
        nmbe = desc.bendenergy(path + im_file, ss)
        db[im_file] = numpy.hstack((cl[im_file], numpy.log(nmbe())))
        # nome das figuras
    data1 = numpy.array([db[i] for i in db.keys()])
    Y = data1[:, 0].astype(int)
    X1 = scale(data1[:, 1:])
    #iso = Isomap(n_neighbors=98, max_iter= 2500)
    mds = MDS(n_init=20, dissimilarity='euclidean', max_iter=2500)
    #X1 = iso.fit_transform(data1[:,1:])
    X1 = mds.fit_transform(data1[:, 1:])

    #r = ((pdist(data1[:,1:]) - pdist(X1))**2).sum()
    #s = ((pdist(X1)-pdist(X1).mean())**2).sum()
    #R2 = 1-r/s
    #print R2
    data = numpy.vstack((Y, X1.transpose())).transpose()
    db = dict(zip(db.keys(), data))
    ax = PLT.subplot(111)
    PLT.gray()
    PLT.xlim((-5., 5.))
    PLT.ylim((-5., 5.))
    for im in db.keys():
        # add a first image
        img = Image.open(path + im)
        img.thumbnail((100, 100), Image.ANTIALIAS)
        #img = PIL.ImageOps.invert(img.convert("L"))
        img = img.convert("RGBA")
        datas = img.getdata()
        newData = []
        for item in datas:
            if item[0] == 255 and item[1] == 255 and item[2] == 255:
                newData.append((255, 255, 255, 0))
            else:
                newData.append(ImageColor.getrgb(colors[int(db[im][0])]))
        img.putdata(newData)
        imagebox = OffsetImage(numpy.array(img), zoom=.15)
        xy = [db[im][1], db[im][2]]  # coordinates to position this image
        ab = AnnotationBbox(imagebox,
                            xy,
                            xybox=(2.5, -2.5),
                            xycoords='data',
                            boxcoords="offset points",
                            frameon=False)
        ax.add_artist(ab)
        ax.grid(False)
예제 #2
0
#!/usr/bin/python

import sys
import descritores
import pylab
import cPickle
import metrics

db = cPickle.load(open(sys.argv[1] + "/classes.txt"))
#names = cPickle.load(open(sys.argv[1]+"/names.pkl"))
names = db.keys()
scales = pylab.loadtxt(sys.argv[2])

X = [
    pylab.vstack(([db[f] for f in names],
                  pylab.array([
                      pylab.log(descritores.bendenergy(sys.argv[1] + f, s)())
                      for f in names
                  ]).T)).T for s in scales
]

for x in X:
    s = metrics.silhouette(x[:, 1:], x[:, 0].astype(int) - 1)
    print pylab.mean(s), pylab.std(s)

for n, x in zip(['nmbe_pso.pkl', 'nmbe_de.pkl', 'nmbe_sa.pkl'], X):
    with open(n, "wb") as f:
        cPickle.dump(dict(zip(names, x)), f)
예제 #3
0
def ff(cc,s):
  return np.log(desc.bendenergy(cc,s)())
예제 #4
0
cl = pickle.load(f)
f.close()

db = {}

# Definição as escalas de acordo com Costa (1996)
S = 16
tau_max = 128.
tau_min = 0.8

oct_l = scipy.array([math.sqrt(2)**l for l in range(S)])

sigma = tau_min + (tau_max - tau_min) * (oct_l - math.sqrt(2)) / (oct_l.max() -
                                                                  math.sqrt(2))

print(sigma)
#sigma= scipy.logspace(0.1,1.65,N)
tt = time.time()

print("feature extraction")

for im_file in cl.keys():
    print(im_file)
    nmbe = descritores.bendenergy(diretorio + im_file, sigma)
    db[im_file] = scipy.hstack((cl[im_file], scipy.log(nmbe())))

with open(sys.argv[2], 'wb') as f:
    pickle.dump(db, f)

print("done feature extraction in {0} seconds".format(time.time() - tt))
예제 #5
0
def ff(cc, s):
    return np.log(desc.bendenergy(cc, s)())
예제 #6
0
if caminho:
    walk(caminho, visit, lista_de_arquivos)
else:
    os.exit(-1)

if passo == "0":
    # Le arquivo de entrada e gera descritores
    # plota graficos e gera arquivos de saida
    sigma = np.logspace(sigma_min, sigma_max, sigma_n)
    figure(1)
    for im_file, out_file, plt_file in zip(lista_de_arquivos[0], lista_de_arquivos[1], lista_de_arquivos[2]):
        #   print im_file,"\t",out_file,"\t",plt_file,"\n"
        output = cStringIO.StringIO()
        fout = open(out_file, "w")
        k = curvatura(im_file, sigma)
        nmbe = bendenergy(k)
        for a in nmbe.phi[::-1]:
            output.write("{0: < 5,.3f} ".format(math.log(a)))
            output.write("\n")
        fout.write(output.getvalue())
        output.close()
        fout.close()
        a = np.loadtxt(out_file)
        plot(np.log(1 / sigma[::-1]), a, ".")
        savefig(plt_file)
        clf()

elif passo == "1":

    # dicionario que associa cada figura a classe que esta pertence
    classe_dic = dict(
예제 #7
0
파일: eval_mad.py 프로젝트: mmssouza/coevol
#!/usr/bin/python

import sys
import descritores
import pylab
import cPickle
import metrics

db = cPickle.load(open(sys.argv[1]+"/classes.txt"))
#names = cPickle.load(open(sys.argv[1]+"/names.pkl"))
names = db.keys()
scales = pylab.loadtxt(sys.argv[2])

X = [pylab.vstack(([db[f] for f in names],pylab.array([pylab.log(descritores.bendenergy(sys.argv[1]+f,s)()) for f in names]).T)).T for s in scales]

for x in X:
 s = metrics.silhouette(x[:,1:],x[:,0].astype(int)-1)
 print pylab.mean(s),pylab.std(s)

for n,x in zip(['nmbe_pso.pkl','nmbe_de.pkl','nmbe_sa.pkl'],X): 
 with open(n,"wb") as f:
  cPickle.dump(dict(zip(names,x)),f)


예제 #8
0
파일: Scatter.py 프로젝트: mmssouza/coevol
8:"#aa2a2a",9:"#d4552a",10:"#ff7f2a",11:"#00d455",12:"#2aff55",13:"#7f2a55",14:"#aa5555",15:"#d47f55",	
16:"#ffaa55",17:"#2a2a00",18:"#2aff7f",19:"#7f007f",20:"#aa557f",21:"#d47f7f",22:"#ffaa7f",23:"#00ffaa",	
24:"#5500aa",25:"#7f2aaa",26:"#aa55aa",27:"#d4aaaa",28:"#ffd4aa",29:"#2a00d4",30:"#552ad4",31:"#7f55d4",	
32:"#aa7fd4"}

path = sys.argv[1]

with open(path+"classes.txt","r") as f:
 with open(path+"names.pkl","r") as g:
   cl = cPickle.load(f)
   nomes = cPickle.load(g)

db = {}

for im_file in nomes:
 nmbe = desc.bendenergy(path+im_file,sigma)
 db[im_file] = numpy.hstack((cl[im_file],numpy.log(nmbe())))

# nome das figuras
data1 = numpy.array([db[i] for i in db.keys()])

Y = data1[:,0].astype(int)
X1 = scale(data1[:,1:])
s = silhouette.silhouette(X1,Y-1)
print numpy.median(numpy.abs(1.-  s))

#iso = Isomap(n_neighbors=98, max_iter= 2500)
mds =  MDS(n_init = 20,dissimilarity = 'euclidean',max_iter = 2500)
#X1 = iso.fit_transform(data1[:,1:])
X1 = mds.fit_transform(data1[:,1:])
예제 #9
0
warnings.simplefilter("ignore")

ss = pylab.loadtxt(sys.argv[1])
path = sys.argv[2]
dim = ss.shape[1]-8
with open(path+"classes.txt","rb") as f:
 with open(path+"names.pkl","rb") as g:
   cl = pickle.load(f)
   nomes = pickle.load(g)

   clf = neighbors.KNeighborsClassifier(n_neighbors = 3)
   it = model_selection.RepeatedStratifiedKFold(n_splits = 5,n_repeats = 50)

   for s in ss:
    sigma = s[4:4+dim]
    SI,DB,CH = s[dim+4],s[dim+5],s[dim+6]

    db = {}
    for im_file in nomes:
      nmbe = desc.bendenergy(path+im_file,sigma)
      db[im_file] = np.hstack((cl[im_file],np.log(nmbe())))
    # nome das figuras

    Y = np.array([db[i][0] for i in db.keys()]).astype(int)
    X = scale(np.array([db[i][1:] for i in db.keys()]))
    res =  model_selection.cross_val_score(clf,X,Y,cv = it,scoring = "accuracy")
    st = str("{0} {1} {2} {3} {4} {5}").format(s[1],s[2],s[3],SI,DB,CH)

    print(" ".join([st]+["{:2.2f}".format(i) for i in sigma]+["{:0.2f} {:0.2f}".format(res.mean(),res.std())]))
    print