Esempio n. 1
0
def just_confusion(saveinfo, runinfo, verbose):
    # setup
    resultdir = saveinfo["resultdir"]
    datadir = os.path.expanduser(os.path.join("~", "workdir", "data", "openfMRI"))
    cv_dir = runinfo["cv_dir"]
    infiles = [os.path.split(ss)[-1] for ss in glob(cv_dir+"/*.npz")]
    run_models = np.unique(np.array([ss[:ss.find('_cv')] for ss in infiles]))
    hamming_models = sorted([mod for mod in run_models if "hamming" in mod])
    ranking_models = sorted([mod for mod in run_models if "rankloss" in mod])
    
    # get label ordeing
    zdatafile = os.path.join(datadir, 'concept.npz')
    Z = np.load(zdatafile)['Z_class']
    P = Z.sum(axis=0)/float(len(Z)) # popularity for each label
    ordering = P.argsort()[::-1] # max to min
    P = P[ordering]
    
    # print re-ordered list of concepts
    concepts  = load_concepts()
    with open("data/rconcepts", 'w') as outfile:
        for idx in ordering:
            outfile.write(concepts[idx])
            outfile.write("\n")
        
    # plot histogram of label ordering
    plot_list(P, "data/popularity", "Fraction")
    
    # get/plot confusion matrices
    for model in it.chain(hamming_models, ranking_models):
        runinfo["model"]  = clean_model(model)
        runinfo["scorer"] = clean_scorer(model) 
        sub_just_confusion(saveinfo, runinfo, verbose, ordering)
Esempio n. 2
0
def get_all(saveinfo, runinfo, verbose):
    '''
    For all models / scoring
    print:
    mean / std score, not_sig / sig / very sig
    save:
    + sig levels
    mean map, sig_map, very_sig_map, sig_levels
    '''
    
    # setup resultfile
    resultdir = saveinfo["resultdir"]
    resultfile = os.path.join(resultdir, "resultfile.txt")
    fh = open(resultfile, 'wb')
    
    # check which models have completed training
    cv_dir = runinfo["cv_dir"]
    infiles = [os.path.split(ss)[-1] for ss in glob(cv_dir+"/*.npz")]
    run_models = np.unique(np.array([ss[:ss.find('_cv')] for ss in infiles]))
    hamming_models = sorted([mod for mod in run_models if "hamming" in mod])
    ranking_models = sorted([mod for mod in run_models if "rankloss" in mod])
    
    # start writing
    writestring = "MODELS | "
    writestring += " | ".join(ss for ss in scores)
    writestring += " | "
    writestring += " | ".join(ss for ss in losses)
    #writestring += " | "
    #writestring += " | ".join(ss for ss in ("sparse", "sparse_s", "sparse_ss", "sparse_max"))
    
    if verbose:
        print writestring
    fh.write(writestring)
    #verbose = False
    for model in it.chain(hamming_models, ranking_models):
        runinfo["model"]  = clean_model(model)
        runinfo["scorer"] = clean_scorer(model)
        
        single_results = get_single(saveinfo, runinfo, False)#verbose)
        mod_scores, mod_losses, shuf_scores, shuf_losses, sparsity = single_results
        
        # print scores to file
        writestring ="\n%s | "%model
        writestring +=" | ".join("%.2f (%.2f) %s"%(mloss[0], mloss[1], ("***" if mloss[0] > sloss[2] else ("**" if mloss[0] > sloss[1] else ("*" if mloss[0] > sloss[0] else "")))) \
                                                  for (mloss, sloss) in it.izip(mod_scores.T, shuf_scores.T))
        writestring += " | "
        writestring +=" | ".join("%.2f (%.2f) %s"%(mloss[0], mloss[1], ("***" if mloss[0] < sloss[2] else ("**" if mloss[0] < sloss[1] else ("*" if mloss[0] < sloss[0] else "")))) \
                                                  for (mloss, sloss) in it.izip(mod_losses.T, shuf_losses.T))
        #writestring += " | "
        #writestring +=" | ".join("%.2f"%(ss,) for ss in sparsity)
        if verbose:
            print writestring
        fh.write(writestring)
    fh.close()
Esempio n. 3
0
def plot_all_brains(mainfile=mainfile):
    ''' look at results of cv and plot it '''
    
    # parse to get general skeleton
    #saveinfo, runinfo, verbose = parser()
    datadir = os.path.expanduser(os.path.join("~", "workdir", "data", "openfMRI", "preprocessed"))
    
    # setup resultfile
    #cv_dir = os.path.expanduser(os.path.join("~", "workdir", "multilabel", "results", "doublecv"))
    cv_dir = os.path.expanduser(os.path.join("~", "workdir", "multilabel", "results", "wardcv"))
    resultdir = os.path.join(cv_dir, "processed")
    
    # check which models have completed training
    infiles = [os.path.split(ss)[-1] for ss in glob(resultdir+"/*.npz")]
    run_models = [ss.split('.')[0] for ss in infiles if ("majority" not in ss and "results" not in ss)] # all model without npz
    
    # get mask
    mainimg = nb.load(mainfile)
    img_affine = mainimg.get_affine()
    
    templatefile = os.path.expanduser(os.path.join("~", "workdir", "data", "standard", "MNI152lin_T1_2mm.nii.gz"))
    templateimg = nb.load(templatefile)
    template = templateimg.get_data()
    template_affine = templateimg.get_affine()
    
    # get concept list
    targetfile = os.path.join(datadir, "targets_run1.npz")
    concept_list = [concept.rstrip("\r") for concept in np.load(targetfile)["concepts"] ]
    
    for all_model in run_models:
        print all_model
        model  = clean_model(all_model)
        scorer = clean_scorer(all_model)
        
        # load data
        datafile = os.path.join(resultdir, all_model+".npz")
        data = np.load(datafile)
        brain = data["mean_W"] # (Dx, Dy, Dz, L)
            
        # create weight matrix image
        print "plotting %s"%(all_model,)
        imgfile = os.path.join(resultdir, all_model+"_brain")
        try: os.makedirs(imgfile)
    	except: pass
        
        nbrain = brain.shape[-1]
        #print brain.shape
        for idx in range(nbrain):
            brainfile = os.path.join(imgfile, concept_list[idx])
            plot_brain(brain[:,:, :, idx], img_affine, template, template_affine, brainfile)
    	   #plot_brain(W, img_affine, template, template_affine, imgfile, alphabet=concept_list)
            
    print "done!"