コード例 #1
0
def readin():
    """ read in the best fit:
        p,t,s,m,w,model,data,isos,isow = readin()
        parameters, time, sfr, metallicity, weights, model, data, ...
    """
    log_tmax = Numeric.log10(13.7e9)
    data = iso.readfits(isodir + "/datarr.fits")
    isos = iso.readisos(isodir)
    t = utils.frange(8, log_tmax, 0.001)
    p = parameters()
    p.load()
    m, s, w, isow, model = compute_model(t, p, isos, data)
    return p, t, s, m, w, model, data, isos, isow
コード例 #2
0
ファイル: plotsmooth.py プロジェクト: certik/chemev
def readin():
    """ read in the best fit:
        p,t,s,m,w,model,data,isos,isow = readin()
        parameters, time, sfr, metallicity, weights, model, data, ...
    """
    log_tmax = Numeric.log10(13.7e9)
    data=iso.readfits(isodir+"/datarr.fits")
    isos = iso.readisos(isodir)
    t=utils.frange(8,log_tmax,0.001)
    p=parameters()
    p.load()
    m,s,w,isow,model = compute_model(t,p,isos,data)
    return p,t,s,m,w,model,data,isos,isow
コード例 #3
0
# -*- coding: utf-8 -*-
import pandas as pd
import cepy as cp

# Leitura do arquivo de dados
matriz = pd.read_csv("especies.csv", header=0, index_col=0, encoding="utf8")
names = cp.make_cepnames(matriz.index)
names = [name[0:4] + ' ' + name[4:8] for name in names]
matriz.index = names

# Chamada da função definida pelo usuário parameters()
from params import parameters
p = parameters(matriz, freq=True, dom=True, tofile=False)

# Número total de indivíduos
t = p['abundance'].sum()

# Número total de espécies
s = len(matriz)

# Total de espécies por categoria
a = len(p[p['category'] == "Acessoria"])
d = len(p[p['category'] == "Acidental"])
c = len(p[p['category'] == "Constante"])

# Saída dos resultados
from tabulate import tabulate
print(
    tabulate(p,
             headers='keys',
             tablefmt='psql',
    print(dates_list)  # PRINT THE JOBS LIST

    # Starting the loop over the Jobs list
    for index, row in dates_list.iterrows():
        # Compose de query parameters from CSV
        start_date = datetime.strptime(row["start"],
                                       "%d/%m/%Y").strftime("%Y-%m-%d")
        end_date = datetime.strptime(row["end"],
                                     "%d/%m/%Y").strftime("%Y-%m-%d")
        start_time = row["start_time"]
        end_time = row["end_time"]
        hashtag = row["query"]
        capture_name = row["capture_name"]

        # create the query string
        query_params, pharse = p.parameters(start_date, end_date, start_time,
                                            end_time, hashtag)
        pharse = pharse["value"]
        query_params["query"] = pharse

        # Create the output name
        try:
            if not os.path.exists(capture_name):
                os.makedirs(capture_name)
        except IndexError:
            print("ERROR")
            pass

        # Generate the Filename
        actual_time = datetime.now()
        capture_time = actual_time.strftime("%d-%m-%Y-%H-%M-%S")
        filename = (
コード例 #5
0
ファイル: prot.py プロジェクト: certik/chemev
import sys
sys.path.append("/home/ondrej/py")

import utils
import iso
from params import parameters
import fit

gauss = True

isodir = "/home/ondrej/data/isochrones/696/halo"
data = iso.readfits(isodir + "/datarr.fits")
isos = iso.readisos(isodir)
t = utils.frange(8, 10.25, 0.001)
p = parameters()
p.load()
m = fit.metallicity(t, p)
s = fit.sfr(t, p)
w = utils.calculateweights(t, s)
if gauss:
    isow = iso.getisosweights_gauss(w, 10.**t, m, isos, p.sigma)
else:
    isow = iso.getisosweights(w, 10.**t, m, isos)
model = iso.computeCMD(isow, isos)
#model=isos[0][2]*0.0+1.0
model = utils.normalize(model, sum(data.flat))
#model=model/sum(model.flat)


def plot_residuals(d, m):
コード例 #6
0
ファイル: prot.py プロジェクト: certik/chemev
import sys
sys.path.append("/home/ondrej/py")

import utils
import iso
from params import parameters
import fit

gauss=False

isodir="/home/ondrej/data/isochrones/696/halo"
data=iso.readfits(isodir+"/datarr.fits")
isos = iso.readisos(isodir)
t=utils.frange(8,10.25,0.001)
p=parameters()
p.load()
m=fit.metallicity(t,p)
s=fit.sfr(t,p)
w=utils.calculateweights(t,s)
if gauss:
    isow=iso.getisosweights_gauss(w,10.**t,m,isos,p.sigma)
else:
    isow=iso.getisosweights(w,10.**t,m,isos)
model=iso.computeCMD(isow,isos)
#model=isos[0][2]*0.0+1.0
model=utils.normalize(model,sum(data.flat))
#model=model/sum(model.flat)

def plot_residuals(d,m):
    import pylab
コード例 #7
0
#This the main script file it is intended to be minimal and to run all other parts of the code
import torch
##!fix the class cropper
##!fix the data flattening
##!replace cross entropy with NLL later on

#load configuration
from params import parameters
conf = parameters()

#load dataset
import dataset_loader

# c = class_counter(conf,datasets=None, n_classes=2)
n, m, c = dataset_loader.data_shape(conf, datasets=None, n_classes=2)
data_loaders = dataset_loader.get_dls(conf, n_classes=c)

#initialize network
import model_conf

loss_func = torch.nn.CrossEntropyLoss()  ##!replace with NLL later on
# model,optimizer = get_model(conf,m,c)
learner = model_conf.Learner(*model_conf.get_model(conf, m, c), loss_func,
                             data_loaders)

#train
from trainer import fit
fit(conf, learner)

#plot
print("hi!")
コード例 #8
0
ファイル: main.py プロジェクト: guy-amir/core
#load libraries
from params import parameters
from dl import get_dataloaders
from model_conf import Forest
from train_conf import Trainer
import pandas as pd

#load default parameters (including device)
prms = parameters()

#dataloaders
trainset, testset, trainloader, testloader = get_dataloaders(prms)


def df_maker(loss_list, val_acc_list, train_acc_list, wav_acc_list,
             cutoff_list, smooth_list):
    df = pd.DataFrame({
        'loss_list': loss_list,
        'val_acc_list': val_acc_list,
        'train_acc_list': train_acc_list
    })
    if prms.wavelets and prms.use_tree:
        for ii in range(len(wav_acc_list[0])):
            df[f'{cutoff_list[ii]} wavelets'] = [
                wav_acc_list[jj][ii] for jj in range(len(wav_acc_list))
            ]
    for kk in range(len(smooth_list[0])):
        df[f'layer chunk {kk}'] = [
            smooth_list[jj][kk] for jj in range(len(smooth_list))
        ]
コード例 #9
0
def simul(isodir):
    """ Read in parameters, data and isochrones. Create callback functions
    for the optimization routine, one of which will return the log(likelihood)
    and the other of which will print the best-fit parameter values. Having
    done this, call the optimization routine to minimize log(L).
    """
    log_tmax = math.log10(13.7e9)
    params=parameters()
    eps=0.01
    #feh
    params.set("m0y"   ,1.0, 0,2.5,True)
    params.set("m0cphi",-0.001,  -pi/2+eps,0,True)
    params.set("m0cr"  ,0.01,  0,2,True)
    params.set("m1y"   ,-2.5,  -2.5,0.9,True)
    params.set("m1cphi",1.67,  pi/2+eps,pi,True)
    params.set("m1cr"  ,1.06,  0,2,True)
    params.set("sigma" ,0.2,  0,1, True)
    params.set("dsigmadlogt" ,0.2,  -1,1, True)

    #sfr
    params.set("s0x"   ,8.0,   8.0,9.0,True)
    params.set("s0y"   ,0.5,   0.0,1,True)
    params.set("s0tx"  ,0.1,   0.,1.,True)
    params.set("s0ty"  ,0.1,   0,1,True)
    params.set("s1tx"  ,0.1,   0.,1.,True)
    params.set("s1ty"  ,0.1,   -1,1,True)
    params.set("s1x"   ,0.5,   0,1,True)
    params.set("s1y"   ,1.0,   0.0,1.0,False)
    params.set("s2x"   ,log_tmax,  9.5,10.25,True)
    params.set("s2y"   ,0.1,   0,1.0,True)
    params.set("s2tx"  ,0.1,   0.,1.,True)
    params.set("s2ty"  ,0.1,   0.,1.,True)

    if len(sys.argv) == 2:
        if sys.argv[1] == "start": #run with a param to start from the beginning
            params.save()
    params.load()
    if not params.pars.has_key('dsigmadlogt'):
        params.set('dsigmadlogt',0.,0,False)
    if not params.pars.has_key('dsigmadlogs'):  # Hook for SFR-depenedent spread; not fully implemented 
        params.set('dsigmadlogs',0.,0,False)
    if len(sys.argv) == 2:
        if sys.argv[1] == "nudge": #Tweak the values near their limits
             print "Nudging parameters near the limits"
             p1 = params.getl()
             utils.nudge(params)
             p2 = params.getl()
             for pp1,pp2 in zip(p1,p2):
                 if pp1[1] != pp2[1]:
                      print "%s %.8f -> %.8f" % (pp1[0],pp1[1],pp2[1])

    data=iso.readfits(isodir+"/datarr.fits")
    isos = iso.readisos(isodir)
    t=utils.frange(8,log_tmax,0.001)
    def f(par):
        params.setvalues(par)
        p = params
        w=utils.calculateweights(t,sfr(t,params))
        # isow=iso.getisosweights(w,10.**t,metallicity(t,params),isos)
        if p.sigma > 0.:
            if p.dsigmadlogt == 0.:
                isow=iso.getisosweights_gauss(w,10.**t,metallicity(t,p),isos,p.sigma)
            if p.dsigmadlogt != 0.:
#               print "Gaussian sigma, ds/dlogt ",p.sigma,p.dsigmadlogt
                isow=iso.getisosweights_vgauss(w,10.**t,metallicity(t,p),isos,p.sigma,p.dsigmadlogt)
            if p.dsigmadlogs != 0.: # Hook for SFR-depenedent spread; not fully implemented 
                isow=iso.getisosweights_sgauss(w,10.**t,sfr(t,params),metallicity(t,p),
                   isos,p.sigma,p.dsigmadlogs)
        else:
            isow=iso.getisosweights(w,10.**t,metallicity(t,p),isos)

        m=iso.computeCMD(isow,isos)
        m=utils.normalize(m,sum(data.flat))
        return utils.loglikelihood(m,data)

    d = numarray.maximum(data,1e-20)
    llhC=sum( (d*numarray.log(d)).flat )
    def b(par,value,iter):
        params.setvalues(par)
        params.save()
        print "henry:",value,"tom:",2.0*(value+llhC),"iter:",iter,time.ctime()
        sys.stdout.flush()

    optimization.minmax(optimization.fmin_simplex,f,
            params.getvalues(),params.min(),params.max(),b)