Ejemplo n.º 1
0
def gen_predictions():

    #--setup nlo code
    set_mstw()

    #--setup pdfs
    hq.setup(fname.ljust(100), 0)
    nsets = 97

    #--read kinematics
    t = pd.read_excel('expdata/eic.xlsx')
    t = t.to_dict(orient='list')
    npts = len(t['X'])

    #--compute F2c accross kinematics
    for iset in range(nsets):
        hq.setup_pdfset(iset)

        t[iset] = []  #--create empty array to fill theory

        for i in range(npts):
            x = t['X'][i]
            Q2 = t['Q2'][i]
            Q = np.sqrt(Q2)
            #,f2,f2c,f2b,fl,flc,flb
            F2c = hq.mstwnc(x, Q, 1)[1]
            t[iset].append(F2c)

    #--convert all arrays to numpy arrays
    for _ in t:
        t[_] = np.array(t[_])

    checkdir('data')
    save(t, 'data/predictions.po')
Ejemplo n.º 2
0
def get_vocab_info(doc, train_idx, min_freq, max_vocab_size, n_gram, dir_path):
    if os.path.exists(os.path.join(dir_path, settings.vocab_file)):
        vocab_info = tools.load(os.path.join(dir_path, settings.vocab_file))
        if vocab_info['min_freq'] == min_freq and vocab_info['max_vocab_size'] == max_vocab_size \
            and vocab_info['df'] == len(train_idx) and vocab_info['n_gram'] == n_gram:
            return vocab_info
    tf = Counter()
    data_doc = [doc[i] for i in train_idx]
    for i, doc in enumerate(data_doc):
        for tf_tuple in doc:
            term, frequency = tf_tuple
            tf[term] += frequency
    tf_tuples = sorted(tf.items(), key=lambda x:
                       (-x[1], x[0]))  # sort by frequency, then alphabetically
    stoi = dict()
    itos = dict()
    valid_term = 0
    for term, freq in tf_tuples:
        if freq >= min_freq and valid_term < max_vocab_size:
            stoi[term] = valid_term
            itos[valid_term] = term
            valid_term += 1
        else:
            tf.pop(term)
    vocab_info = {
        "min_freq": min_freq,
        "max_vocab_size": max_vocab_size,
        'n_gram': n_gram,
        "stoi": stoi,
        "itos": itos,
        "tf": tf,
        'df': len(train_idx)
    }
    tools.save(os.path.join(dir_path, settings.vocab_file), vocab_info)
    return vocab_info
Ejemplo n.º 3
0
def main():
    """
    Main program loop.
    """

    args = setup_args()
    """
    Get the meta arguments.
    """
    cmd = tools.meta(args)

    args = vars(args)
    corpus = load_corpus(filename=args.pop('file'), clean=args.pop('clean'))
    detector = create_detector(model=args.pop('model'),
                               extractor=args.pop('extractor'),
                               scorer=args.pop('scorer'),
                               filter=args.pop('filter'),
                               corpus=corpus,
                               **args)
    resolved, extrapolated = detect(detector=detector, corpus=corpus)

    cmd['model'] = str(type(detector).__name__)
    cmd['extractor'] = str(type(detector.extractor).__name__)
    cmd['scorer'] = str(type(detector.scorer).__name__)
    cmd['filter'] = str(type(detector.filter).__name__)
    cmd['resolver'] = str(type(detector.resolver).__name__)
    cmd['extraploator'] = str(type(detector.extrapolator).__name__)
    cmd['postprocessor'] = str(type(detector.postprocessor).__name__)
    tools.save(args['output'], {
        'meta': cmd,
        'resolved': resolved,
        'extrapolated': extrapolated
    })
Ejemplo n.º 4
0
    def runEndless(self, filename, interval=15):
        """Run tournaments endlessly until terminated, and save data every so often. The interval between 
        saves/progress reports can be modifed by the keyword interval (minutes)."""
        start = time.time()
        last_report = start
        t = 0  # Counter for triggering generational recording of statistics
        while True:  # Evolutionary loop
            self.tournament()  # Run one tournament
            if (t % self.popsize == 0):  # Record statistics every generation
                af, bf, d, c = self.fitStats()
                self.avgHistory.append(af)
                self.bestHistory.append(bf)
                self.divHistory.append(d)
                self.conHistory.append(c)
                self.dateEdited = str(date.today())

            if (time.time() - last_report) > (
                    interval * 60
            ):  # If it's been more than N minutes since last report/save, save/report
                print("\nSaving...")
                save(filename, self)
                print('Generations Run: %i' %
                      int(self.generationsRun))  # Print generations run so far
                print('Fitness Avg=%f, Max=%f, Div=%f, Con=%f' %
                      tuple(self.fitStats()))
                print('Time elapsed: %f sec / %f min / %f hours' %
                      ((time.time() - start), (time.time() - start) / 60,
                       (time.time() - start) / 3600))
                last_report = time.time()

            t += 1
Ejemplo n.º 5
0
    async def on_ready():
        """Preliminary activity

        Checks all guilds for all users and initializes them in memory.
        """
        print('We have logged in as {0.user}.'.format(client))
        print('I will be speaking "{0}" as default.'.format(language))
        print("Looking for new members/guilds while I was away...")
        # We search all guilds and users Milton can see and initialize them.
        i = 0
        for guild in client.guilds:
            # Add new guilds
            if str(guild.id) not in G.GLD.keys():
                default_dict = {"language": language}
                G.GLD[str(guild.id)] = mun.DefaultMunch().fromDict(default_dict, 0)
            tools.save(G.OPT.guilds_path, G.GLD)
            # Add new members
            for member in guild.members:
                if str(member.id) not in G.USR.keys():
                    i += 1
                    G.USR[str(member.id)] = mun.DefaultMunch(0)
                G.USR[str(member.id)].name = member.name
        tools.save_users()
        print(f"Found {i} new members")
        game = ds.Game("with myself.")  # Update "playing" message, for fun.
        print("Ready!")
        await client.change_presence(status=ds.Status.online, activity=game)
def get_vocab_info(doc, labels, train_idx, output_path, sparse_format=False):
    if os.path.exists(os.path.join(output_path, settings.vocab_file)):
        vocab_info = tools.load(os.path.join(output_path, settings.vocab_file))
        if len(vocab_info['vocab_dict']) <= settings.max_vocab_size:
            return vocab_info
    tf = Counter()
    data_doc = [doc[i] for i in train_idx]
    leaf_label = labels[-1][train_idx]
    for i, x in enumerate(data_doc):
        for word_tuple in x:
            word, frequency = word_tuple
            if sparse_format or (word not in stop_words
                                 and not word.isnumeric()):
                tf[word] += frequency

    vocab_dict = dict()
    new_tf = Counter()
    for i, v in enumerate(tf.most_common(settings.max_vocab_size)):
        vocab_dict[v[0]] = i
        new_tf[v[0]] = tf[v[0]]
    tf = new_tf
    tf["<DF>"] = len(data_doc)  # to store the number of documents
    vocab_info = {"vocab_dict": vocab_dict, "tf": tf}
    tools.save(os.path.join(output_path, settings.vocab_file), vocab_info)
    return vocab_info
Ejemplo n.º 7
0
def gen_hess_error():
    t = load('data/predictions.po')
    nsets = 97
    err2 = np.zeros(t[0].size)
    for iset in range(1, nsets, 2):
        err2 += (t[iset] - t[iset + 1])**2 / 4.0
    t['hess'] = err2**0.5
    save(t, 'data/hess.po')
def split_train_test(data, classes, rate, output_dir, seed=0):
    train_idx, test_idx = split(data, classes, rate, seed=seed)
    tools.make_sure_path_exists(output_dir)
    tools.save(os.path.join(output_dir, 'train_test_idx.npz'), {
        'train_idx': train_idx,
        'test_idx': test_idx
    })
    return train_idx, test_idx
 def start(self):
     best_fit = settings.min_fit
     best_index = -1
     for i in range(self.size):
         draw = Draw()
         draw.set_fit(self.target)
         self.population[i] = draw
         if (draw.fit <= best_fit):
             best_index = i
             best_fit = draw.fit
     tools.save(self.population[best_index], best_fit, 0)
Ejemplo n.º 10
0
def gen_rand(nrep=1000):
    """
    random pdfs will be constructed from eigen directions as
    f_k = f_0 + sum_i rand_i (f[i]-f[-i])/2 
    similarly the random  F2c will computed as
    F2c_k = F2c_0 + sum_i rand_i (F2c[i]-F2c[-i])/2 
    here we generate the rand_i
    """
    nsets = 97
    rand = np.random.randn(nrep, (nsets - 1) / 2)
    save(rand, 'data/rand.po')
def split_label_unlabel(data, index, classes, rate, output_dir, seed=0):
    label_idx, unlabel_idx = split_data.split(data,
                                              classes,
                                              rate,
                                              index=index,
                                              seed=seed)
    tools.make_sure_path_exists(output_dir)
    tools.save(os.path.join(output_dir, 'label_unlabel_idx.npz'), {
        'label_idx': label_idx,
        'unlabel_idx': unlabel_idx
    })
    return label_idx, unlabel_idx
Ejemplo n.º 12
0
 async def on_guild_join(guild):
     """Handles when Milton joins a guild"""
     if str(guild.id) not in G.GLD.keys():
         default_dict = {"language": language}
         G.GLD[str(guild.id)] = mun.DefaultMunch().fromDict(default_dict, 0)
     tools.save(G.OPT.guilds_path, G.GLD)
     # Add new members
     for member in guild.members:
         if str(member.id) not in G.USR.keys():
             G.USR[str(member.id)] = mun.DefaultMunch(0)
         G.USR[str(member.id)].name = member.name
     tools.save_users()
Ejemplo n.º 13
0
 def get_old_hessian(self):
   F=open('data/correlation_matrix.dat')
   L=F.readlines()
   F.close()
   L=[l.strip().split() for l in L if l.strip()!='']
   L=[[float(x) for x in l] for l in L]
   corr=np.array(L)
   ndim=len(L)
   cov=np.zeros((ndim,ndim))
   for i in range(ndim):
     for j in range(ndim):
       cov[i,j]=corr[i,j]*self.dP0[i]*self.dP0[j]
   hess=LA.inv(cov)
   save(hess,'data/old.hess')
Ejemplo n.º 14
0
def change_language_logic(message):
    prefix_length = len((G.OPT.prefix + G.LOC.commands.changeLang.id)) + 1
    query = message.content[prefix_length:(prefix_length + 2)].lower()
    available_locales = G.GLOC.keys()
    out = tools.MsgBuilder()
    if query in available_locales:
        G.GLD[str(message.guild.id)].language = query
        tools.save(G.OPT.guilds_path, G.GLD)
        G.update_loc(G.GLOC[G.GLD[str(message.guild.id)].language])
        out.add(G.LOC.commands.changeLang.success.format(query.upper()))
        return out.parse()
    else:
        locales = " ".join(available_locales).upper()
        out.add(G.LOC.commands.changeLang.error.format(query, locales))
        return out.parse()
Ejemplo n.º 15
0
async def saveall():
	await say(systemchannel, 'saving all dicts')
	print('saving all dicts')
	
	tools.save(mods, 'mods.json')
	tools.save(localdict, 'localdict.json')
	tools.save(authinfo, 'authinfo.json')
	tools.save(commands, 'commands.json')
Ejemplo n.º 16
0
def gen_hess_glue(Q2=2.0):

    X = 10**np.linspace(-4, -1, 100)
    X = np.append(X, np.linspace(0.1, 0.99, 100))

    data = {'X': X, 'Q2': Q2}
    pdfs = lhapdf.mkPDFs(fname)
    for i in range(len(pdfs)):
        data[i] = np.array([pdfs[i].xfxQ2(21, x, Q2) for x in X])

    err2 = np.zeros(data[0].size)
    for iset in range(1, len(pdfs), 2):
        err2 += (data[iset] - data[iset + 1])**2 / 4.0
    data['err'] = err2**0.5
    save(data, 'data/hess_glue_%.2f.po' % Q2)
Ejemplo n.º 17
0
def encode_chat_text_to_vectors(patterns):
    data = tools.load_if_exists("tmp.pickle")
    if data is not None:
        doc, doc_vecs, reponse_patterns = data
    else:
        doc = list()
        doc_vecs = list()
        reponse_patterns = list()        
        print("encoding sentences")
        for i,pattern in tqdm(enumerate(patterns), total=len(patterns)): 
            doc.extend(pattern.input)
            vectors = [encode(line) for line in pattern.input]                
            doc_vecs.extend(vectors)
            reponse_patterns.extend([pattern]*len(pattern.input))
        tools.save("tmp.pickle",[doc,doc_vecs,reponse_patterns])
    return doc, np.array(doc_vecs), reponse_patterns 
Ejemplo n.º 18
0
def main():
    """
    Main program loop.
    """

    args = setup_args()
    tfidf = construct(
        file=args.file,
        remove_retweets=args.remove_retweets,
        normalize_words=args.normalize_words,
        character_normalization_count=args.character_normalization_count,
        remove_unicode_entities=args.remove_unicode_entities,
        stem=args.stem)
    cmd = tools.meta(args)
    pcmd = tools.meta(args)
    tools.save(args.output, {'cmd': cmd, 'pcmd': pcmd, 'tfidf': tfidf})
Ejemplo n.º 19
0
def gen_mc_F2c():
    rnd = load('data/rand.po')
    t = load('data/predictions.po')

    #--gen MCF2c
    nsets = 97
    mcF2c = []
    for k in range(len(rnd)):
        F2c = np.copy(t[0])
        cnt = 0
        for iset in range(1, nsets, 2):
            F2c += rnd[k][cnt] * (t[iset] - t[iset + 1]) / 2
            cnt += 1
        mcF2c.append(F2c)
    mcF2c = np.array(mcF2c)
    save(mcF2c, 'data/mcF2c.po')
Ejemplo n.º 20
0
def gen_mc_glue(Q2=2.0):
    rnd = load('data/rand.po')
    t = load('data/hess_glue_%.2f.po' % Q2)

    #--gen MCF2c
    nsets = 97
    mcglue = []
    for k in range(len(rnd)):
        g = np.copy(t[0])
        cnt = 0
        for iset in range(1, nsets, 2):
            g += rnd[k][cnt] * (t[iset] - t[iset + 1]) / 2
            cnt += 1
        mcglue.append(g)
    mcglue = np.array(mcglue)
    save(mcglue, 'data/mcglue_%.2f.po' % Q2)
Ejemplo n.º 21
0
def run_continue(filename, generations, save_interval):
    """Run a set number of tournaments, saving along the way every save_interval generations."""
    start = time.time()
    mga = read(filename)
    for g in range(int(
            generations /
            save_interval)):  # Run X generations and save every Y generations
        print('Running generations %i - %i...' % (g * save_interval,
                                                  (g + 1) * save_interval))
        # Run simulation
        mga.runTournaments(save_interval * mga.popsize, report=True)
        # Save data
        generation = int(mga.generationsRun)
        date = mga.dateEdited
        filename = '%s_G%i_%s' % (filename[:-14], generation, date)
        save(filename, mga)
        print('%f sec elapsed so far \n' % (time.time() - start))
Ejemplo n.º 22
0
  def _X_get_scan_data(self,fname):
    F=open(fname)
    L=F.readlines()
    F.close()
    L=[l.split() for l in L]
    
    # construct headres
    H={}
    cnt=0
    for h in L[0][2:]:
      H[h]=cnt
      cnt+=1

    print H
    
    # remove header table
    L=L[1:]
    
    # list or parameters
    K=['BLNY','Nu_c','Nd_c','au_c','ad_c','bu_c','bd_c','Nu_T','Nd_T','au_T','ad_T','bu_T','bd_T']
    
    # data to dict
    D={}
    for l in L: 
      D[l[0]]=[]
    
    for l in L: 
      D[l[0]].append([float(x) for x in l[2:]])
    
    data={} 
    chi20=D['FIT'][0][0]
    for k in K:
      data[k]={'h':[],'dchi2dxdx':[]}
      pars=D[k]
      i=H[k]
      for j in range(0,len(pars),2):
        chi2max=pars[j][0]
        chi2min=pars[j+1][0]
        pmax=pars[j][i]
        pmin=pars[j+1][i]
        h=0.5*(pmax-pmin)
        dchi2dxdx=(chi2max-2*chi20+chi2min)/h**2
        data[k]['h'].append(h)
        data[k]['dchi2dxdx'].append(dchi2dxdx)
    save(data,'data/%s.dchid2dxdx'%fname.split('/')[1].replace('.dat','')) 
 def crossover(pop):
     best_fit = settings.min_fit
     best_index = -1
     new_pop = Population(pop.size, pop.target)
     new_pop.gen = pop.gen + 1
     for i in range(pop.size):
         parent_a = pop.__select_parent(pop)
         parent_b = pop.__select_parent(pop)
         child = Draw.crossover(parent_a, parent_b)
         child.mutate()
         child.set_fit(pop.target)
         if (child.fit <= best_fit):
             best_index = i
             best_fit = child.fit
         new_pop.population[i] = child
     tools.save(new_pop.population[best_index], best_fit, new_pop.gen)
     print best_fit
     return new_pop
Ejemplo n.º 24
0
def generate_dataset(n_games, skill=20, threads=1):
    def play(engine, limit=0.01):
        board = chess.Board()
        # raw_history = []
        history = []
        while not board.is_game_over():
            result = engine.play(board, chess.engine.Limit(time=limit))
            # raw_history.append(board.copy())
            history.append(to_bitboard(board, pack=True))
            board.push(result.move)
        return history, board.result()

    def create_empty_games():
        return {WHITE_WON: [], BLACK_WON: []}

    engine = chess.engine.SimpleEngine.popen_uci(engine_path, timeout=None)
    engine.configure({"Threads": threads, "Skill Level": skill})

    games = create_empty_games()
    print('playing....')

    i = 1
    while i != n_games + 1:
        history, result = play(engine)

        if result != DRAW:
            print(f'.....{i}/{n_games}')
            games[result].append((history, results[result]))

            if i % 1000 == 0:
                filename = f'dataset-{i // 1000}'
                date = get_date()

                save(games[WHITE_WON],
                     f'{filename}-white-{len(games[WHITE_WON])}-{date}')
                save(games[BLACK_WON],
                     f'{filename}-black-{len(games[BLACK_WON])}-{date}')

                games = create_empty_games()

            i += 1

    engine.quit()
Ejemplo n.º 25
0
def start_evolution():
    conta = 0
    generation = 0
    sig = 0
    draw = Draw()
    error = Fitness.fitness(draw,target_matrix)[0]
    while(1):
        conta = conta + 1
        new_draw = copy.deepcopy(draw).mutate()
        if (new_draw.is_dirty()):
            generation = generation + 1
            print conta,generation,sig
            new_error,new_img = Fitness.fitness(new_draw,target_matrix)
            print new_error,"=erro novo /",error,"=erro antigo"
            if (new_error <= error):
                sig = sig  + 1
                draw = new_draw
                draw.dirty = False
                Tools.save(new_img,new_error,generation)
                error = new_error
Ejemplo n.º 26
0
 def get_data(self) -> dict or None:
     timeout = eventlet.Timeout(10)
     try:
         response = requests.get(
             self.URL_VK,
             params={
                 "domain": self.DOMAIN,
                 "count": "5",  # <100
                 "access_token": self.TOKEN,
                 "v": self.VERSION,
             },
             proxies={"https": "188.191.165.92:8080"})
         data = response.json()
         logging.info(data)
         logging.info("finish scanning")
         save(data)
         return data
     except eventlet.timeout.Timeout:
         logging.warning(
             'Got Timeout while retrieving VK JSON data. Cancelling...')
         return None
     finally:
         timeout.cancel()
Ejemplo n.º 27
0
def gen_weights():
    rnd = load('data/rand.po')
    t = load('data/predictions.po')
    mcF2c = load('data/mcF2c.po')

    #--get absolute simulated errors
    t['alpha'] = t[0] * t['relerr']

    #--gen chi2
    chi2 = []
    for F2c in mcF2c:
        exp = np.copy(t[0])
        res = (exp - F2c) / t['alpha']
        chi2.append(np.sum(res**2))
    chi2 = np.array(chi2)
    dchi2 = chi2 - t[0].size

    #--gen weights
    weights = np.exp(-0.5 * dchi2)
    norm = np.sum(weights)
    weights /= norm

    save(weights, 'data/weights.po')
Ejemplo n.º 28
0
def ProcessPIV(args, bga, bgb, reflection, stg):
    # read images into numpy arrays
    file_a, file_b, counter = args
    frame_a = tools.imread(file_a)
    frame_b = tools.imread(file_b)
    # removing background and reflections
    if bga is not None:
        frame_a = frame_a - bga
        frame_b = frame_b - bgb
        frame_a[reflection == 255] = 0
        frame_b[reflection == 255] = 0
    #applying a static mask (taking out the regions where we have walls)
    pnts = draw.polygon(stg['YM'], stg['XM'], frame_a.shape)
    frame_a[pnts] = 0
    frame_b[pnts] = 0
    plt.imshow(frame_a, cmap='gray')
    plt.show()

    # main piv processing
    u, v = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=stg['WS'], overlap=stg['OL'], dt=stg['DT'], search_area_size=stg['SA'], sig2noise_method=None)
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=stg['WS'],
                                     overlap=stg['OL'])
    u, v, mask = validation.local_median_val(u, v, 2000, 2000, size=2)
    if stg['BVR'] == 'on':
        u, v = filters.replace_outliers(u,
                                        v,
                                        method='localmean',
                                        max_iter=10,
                                        kernel_size=2)
        u, *_ = smoothn(u, s=0.5)
        v, *_ = smoothn(v, s=0.5)
    x, y, u, v = scaling.uniform(x, y, u, v, stg['SC'])
    # saving the results
    save_file = tools.create_path(file_a, 'Analysis')
    tools.save(x, y, u, v, mask, save_file + '.dat')
Ejemplo n.º 29
0
  def _get_scan_data(self,fname):
    F=open(fname)
    L=F.readlines()
    F.close()
    L=[l.split() for l in L]

    # get parameter names  
    params=sorted(set([l[0] for l in L[2:]]))

    # get parameters column index
    icols={}
    for p in params:
      for i in range(len(L[0])):
        if L[0][i]==p: icols[p]=i

    # get dchidxdx
    data={} 
    chi20=float(L[1][2])
    for p in params:
      data[p]={'h':[],'dchi2dxdx':[]}
      rows=[l for l in L if l[0]==p]
      icol=icols[p]
      for i in range(0,len(rows),2): 
        chi2max=float(rows[i][2])
        chi2min=float(rows[i+1][2])
        pmax=float(rows[i][icol])
        pmin=float(rows[i+1][icol])
        h=0.5*(pmax-pmin)
        dchi2dxdx=(chi2max-2*chi20+chi2min)/h**2

        data[p]['h'].append(h)
        data[p]['dchi2dxdx'].append(dchi2dxdx)

        fmt='%s %0.4e %0.4e %0.2e %0.2e %0.2e %0.2e %0.4e'
        print fmt%(p,pmin,pmax,chi20,chi2min,chi2max,h,dchi2dxdx)
    save(data,'data/%s.dchid2dxdx'%fname.split('/')[1].replace('.dat','')) 
Ejemplo n.º 30
0
def plotter(train = None, title = "", ylabel = "", fn = "test", plot = True, add_chpts = False, add_cv = True):
#figure
    fig = plt.figure(facecolor='w', figsize=(12, 7))
    ax = fig.add_subplot(111)
    plt.title(title, fontsize=18, y = 1.05)

    ### train result
    train_fig = train.model.plot(train.forecast,  ax = ax, xlabel = "Date", ylabel = ylabel)
    if add_chpts:
        cpts = add_changepoints_to_plot(train_fig.gca(), train.model, train.forecast)
    ax = plt.gca(); ax.yaxis.set_major_formatter(FuncFormatter(xt.y_fmt))
    c_txt = "RMSE=%.1f MAPE=%.1f%%"%(train.param.rmse, train.cv_metrics.manual_mape)

    #format date
    myFmt = mdates.DateFormatter('%d-%m')
    ax.xaxis.set_major_formatter(myFmt)

    #ticks -  locator puts ticks at regular intervals
    xloc = plticker.MultipleLocator(base=3.0) 
    ax.xaxis.set_major_locator(xloc)

    ## Rotate date labels automatically
    fig.autofmt_xdate()

    
    xt.save(train_fig, xt.name(odir, fn+"_prediction" + ("_changepoints" if add_chpts else "") ), c_txt)

    ### components
    train_comp_fig = train.model.plot_components(train.forecast)
    ax = plt.gca(); ax.yaxis.set_major_formatter(FuncFormatter(xt.y_fmt))
    xt.save(train_comp_fig, xt.name(odir, fn+"_components"))
    
    ### MAPE
    if add_cv:
        train_cv_mape = train.cv_mape_fig()
        ax = plt.gca()
        ax.yaxis.set_major_formatter(FuncFormatter(xt.y_fmt))
        xt.save(train_cv_mape, xt.name(odir, fn+"_cv_mape"))

    
    plt.tight_layout()
    if plot: plt.show()

    plt.close('all')
Ejemplo n.º 31
0
 def Save(self):
     save(self.storage, self.fname)
Ejemplo n.º 32
0
  def get_new_hessian(self,fname):
    path='hessian_collins/hessian_results/'

    # get off diagonal parts
    F=open(path+fname+'.out')
    L=F.readlines()
    F.close()
    L=[l.strip().split() for l in L] 

    # construct dict as
    # D[par1,par2] = {'P,P':#,'P,M':#,...}
    D={}
    for l in L:
      p1,p2=l[:2]
      key='%s,%s'%(p1.replace('M_','').replace('P_',''),p2.replace('M_','').replace('P_',''))
      if key not in D: D[key]={}
      key2='%s,%s'%(p1.split('_')[0],p2.split('_')[0])
      if float(l[-1])>0:
        D[key][key2]=float(l[-1])
      else:
        D[key][key2]=None

    # get diagonal parts
    F=open(path+fname+'.diag')
    L=F.readlines()
    F.close()
    L=[l.strip().split() for l in L] 

    for l in L:
      p1,p2=l[:2]
      key='%s,%s'%(p1.replace('M_','').replace('P_',''),p2.replace('M_','').replace('P_',''))
      if key=='FIRST,FIRST': 
        D['central']=float(l[-1])
        continue
      if key not in D: D[key]={}
      key2='%s,0'%(p1.split('_')[0])
      if float(l[-1])>0:
        D[key][key2]=float(l[-1])
      else:
        D[key][key2]=None


    # construct hessian
    npar=len(self.order)
    H=np.zeros((npar,npar))
    for k in D:
      if k=='central': continue
      #print k
      #continue
      #for kk in D[k]:
      #  print '\t',kk,D[k][kk]

      # get target entry at hess matrix (I,J)
      p1,p2=k.split(',')
      for i in range(npar): 
        if self.order[i]==p1: I=i
        if self.order[i]==p2: J=i

      # step size for target entry
      hi=self.par[p1]['err']
      hj=self.par[p2]['err']

      # compute hess entry
      if all([x!=None for x in [D[k][kk] for kk in ['P,P','M,M','P,M','M,P']]]):
        #print D[k]['P,P']
        #print D[k]['M,M']
        #print D[k]['P,M']
        #print D[k]['M,P']
        #print (4*hi*hj)
        if I!=J: 
          H[I,J]=(D[k]['P,P']+D[k]['M,M']-D[k]['P,M']-D[k]['M,P'])/(4*hi*hj)
        elif I==J:
          H[I,J]=(D[k]['P,0']+D[k]['M,0']-2*D['central'])/(hi**2)
      else:
        H[I,J]=None

    for i in range(npar):
      for j in range(npar):
        if i<j: H[j,i]=H[i,j]

    # get final hessian = 1/2 dchi2/dxdy
    H*=0.5


    # print hessian
    row='%10s'%''
    for i in range(npar): row+='%10s'%self.order[i]
    print row
    for i in range(npar):
      row='%10s  '%self.order[i]
      for j in range(npar): 
        if np.isnan(H[i,j])==False:
          row+='%10.2e'%H[i,j]
        else:
          row+='%10s'%''
      print row

    # save hessian
    save(H,'data/'+fname+'.hess')