Example #1
0
# USF prior
#usfnet, usfitems = rw.read_csv('./snet/USF_animal_subset.snet')
#priordict = rw.genGraphPrior([usfnet], [usfitems]) 

for subj in subs:
    print subj
    category="animals"
    Xs, items, irts.data, numnodes=rw.readX(subj,category,'./Spring2015/results_cleaned.csv',ignorePerseverations=True)

    # uinvite prior
    priorgraphs=[]
    prioritems=[]
    for osub in subs:
        if osub != subj:
            g, i = rw.read_csv('Sdirected2015.csv',cols=("node1","node2"),header=True,filters={"subj": osub, "uinvite": "1"},undirected=False)
            priorgraphs.append(g)
            prioritems.append(i)
    priordict = rw.genGraphPrior(priorgraphs, prioritems)

    toydata.numx = len(Xs)
    prior = (priordict, items)

    # u-invite
    directed_prior_graph, bestval=rw.uinvite(Xs, toydata, numnodes, fitinfo=fitinfo, prior=prior)

    # rw
    rw_graph=rw.noHidden(Xs, numnodes)


    g=nx.DiGraph(directed_prior_graph)
Example #2
0
    'numnodes': 280,
    'numlinks': 6,
    'prob_rewire': .3
})

irts = rw.Irts({
    'data': [],
    'irttype': "exgauss",
    'exgauss_lambda': 0.721386887,
    'exgauss_sigma': 6.58655566,
    'irt_weight': 0.95,
    'rcutoff': 20
})

# USF prior
usfnet, usfitems = rw.read_csv('./snet/USF_animal_subset.snet')
priordict = rw.genGraphPrior([usfnet], [usfitems])

for subj in subs:
    print subj
    category = "animals"
    Xs, items, irts.data, numnodes = rw.readX(
        subj,
        category,
        './Spring2016/results_cleaned.csv',
        ignorePerseverations=True)

    # uinvite prior
    #priorgraphs=[]
    #prioritems=[]
    #for osub in subs:
Example #3
0
# Results of previous simulation were hard to analyze since the graph was changing size as number of lists increased
# This file estimates the number of Misses for each numlists/listlength combo to impute into data

import numpy as np
import rw
import math
import random
import networkx as nx
import pygraphviz

usf,items=rw.read_csv('USF_animal_subset.csv')
usfg=nx.to_networkx_graph(usf)
numnodes=len(items)
usfsize=160

misses=[]

f=open('usf_sims_misses.csv','a', 0)                # write/append to file with no buffering

for numlists in range(2,18):
    for listlength in [15,30,50,70]:
        misses=0
        crs=0
        for gnum in range(100):  # how many samples for each numlists/listlength combo
            print numlists, listlength, gnum
            # generate toy lists
            Xs=[rw.genX(usfg)[0:listlength] for i in range(numlists)]

            itemsinXs=np.unique(rw.flatten_list(Xs))    # list of items that appear in toy data

            notinx=[]       # nodes not in trimmed X
Example #4
0
# script to generate USF' network; same as USF network except creates a direct edge between any two animals where
# indirect edge exists (path through exclusively non-animals)
# constraint: path length of 3

import sys
sys.path.append('./rw')
import rw
import numpy as np
import networkx as nx

usf,items=rw.read_csv('USF_full.csv')
usfprime=np.copy(usf) 

# animal list
with open('USFanimals.csv','r') as f:
    animals = f.read()
animals=animals.split('\n')
animals.pop()

animalidx=[items.values().index(i) for i in animals]
nonanimalidx=sorted([i for i in range(5018) if i not in animalidx],reverse=True)

G = nx.to_networkx_graph(usf)

cutoff=3

for node1 in items:
    print node1
    for node2 in items:
        if node2 > node1:       # loop through all pairs (could be optimized)
            if (items[node1] in animals) and (items[node2] in animals): # if both nodes are animals
Example #5
0
#!/usr/bin/python

import pickle
import numpy as np
import rw
import networkx as nx
import sys

filename = sys.argv[1]

usf_graph, usf_items = rw.read_csv("./snet/USF_animal_subset.snet")
usf_graph_nx = nx.from_numpy_matrix(usf_graph)
usf_numnodes = len(usf_items)

td=rw.Data({
        'numx': 3,
        'trim': 35})


fh = open(filename,'r')
alldata = pickle.load(fh)
Xs = alldata['datab'][0:10]
graphs = alldata['uinvite_graphs']
items = alldata['items'][0:10]
priordict = alldata['priordict']

# for old pickles. ha, that's kinda funny.
if 'DEFAULTPRIOR' not in priordict.keys():
    priordict['DEFAULTPRIOR'] = 0.5

# recompute for halfa... should be identical for a=1
Example #6
0
import numpy as np

subs2015=['S101','S102','S103','S104','S105','S106','S107','S108','S109','S110',
      'S111','S112','S113','S114','S115','S116','S117','S118','S119','S120']
subs2016=['S1','S2','S3','S4','S5','S7','S8','S9','S10','S11','S12','S13']

fi='s2015.csv'
subs=subs2015

fo=open('pairs.csv','w')
subpairs=open('subpairs.csv','w')
finalpairs=[]

for sub in subs:
    # read in graphs
    g1, items = rw.read_csv(fi,cols=('node1','node2'),header=1,filters={'subj': sub, 'irt5': '1'},undirected=1)
    g2, items2 = rw.read_csv(fi,cols=('node1','node2'),header=1,filters={'subj': sub, 'irt9': '1'},undirected=1)
    g3, items3 = rw.read_csv(fi,cols=('node1','node2'),header=1,filters={'subj': sub, 'irt95': '1'},undirected=1)
    
    # re-arrange graphs to have same indices
    idx=[items2.values().index(i) for i in items.values()]
    g2=g2[idx][:,idx]
    idx=[items3.values().index(i) for i in items.values()]
    g3=g3[idx][:,idx]
    
    gtotal=(g1+g2+g3)
    
    yes=zip(*np.where(gtotal==4))                # all models agree there is an edge
    no=zip(*np.where(gtotal==0))                 # all models agree there is no edge
    some=zip(*np.where(gtotal>0))                # models disagree on where there is an edge
    some=[i for i in some if i not in yes]
Example #7
0
# temporary script to take full USF network and generate USF' network
# USF' network includes edges between nodes that are indirectly connected in USF,
# even if nodes are not directly connected

import sys
sys.path.append('./rw')
import rw

usf_full, allitems = rw.read_csv('USF_full.csv')
usf_animal, animals = rw.read_csv('USF_animal_subset.csv')
Example #8
0
real_data = {}
real_irts = {}
listlengths = {}
for sub in subs:
    data, items, irts, numnodes = rw.readX(sub, "animals", real_lists)
    listlengths[sub] = [len(x) for x in data]
    data = numToAnimal(data, items)
    real_data[sub] = data
    real_irts[sub] = irts

## generate fake_data and irts yoked to real_data
numsets = 100  # number of sets of fake data per SS
fake_data = {}
fake_irts = {}

USFnet, USFanimals = rw.read_csv('./snet/USF_animal_subset.snet')
USFnet = nx.to_networkx_graph(USFnet)


# methods: genX, frequency, cbdfs, sa
def genFakeData(td, method="genX", decay=0):
    for sub in subs:
        fake_data[sub] = []
        fake_irts[sub] = []
        for setnum in range(numsets):
            dataset = []
            irtset = []
            for listnum, trimval in enumerate(listlengths[sub]):
                td.trim = trimval

                ## SEED START
Example #9
0
    'S1', 'S2', 'S3', 'S4', 'S5', 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13'
]

fi = 's2016.csv'
subs = subs2016

fo = open('pairs.csv', 'w')
subpairs = open('subpairs.csv', 'w')
finalpairs = []

for sub in subs:
    # read in graphs
    g1, items1 = rw.read_csv(fi,
                             cols=('node1', 'node2'),
                             header=1,
                             filters={
                                 'subj': sub,
                                 'uinvite': '1'
                             },
                             undirected=1)
    g2, items2 = rw.read_csv(fi,
                             cols=('node1', 'node2'),
                             header=1,
                             filters={
                                 'subj': sub,
                                 'priming': '1'
                             },
                             undirected=1)
    g3, items3 = rw.read_csv(fi,
                             cols=('node1', 'node2'),
                             header=1,
                             filters={
Example #10
0
    data, items, irts, numnodes = rw.readX(subj, "animals", real_lists)
    listlengths[subj] = [len(x) for x in data]
    data = numToAnimal(data, items)
    real_data[subj] = data
    real_irts[subj] = irts

## generate fake_ data and irts yoked to real_ data
numsets = 100  # number of sets of fake data per SS
fake_data = {}
fake_irts = {}

for subj in subs:
    graph, items = rw.read_csv(real_graphs,
                               cols=('node1', 'node2'),
                               header=1,
                               filters={
                                   'subj': str(subj),
                                   'uinvite': '1'
                               })
    graph = nx.from_numpy_matrix(graph)

    fake_data[subj] = []
    fake_irts[subj] = []
    for setnum in range(numsets):
        dataset = []
        irtset = []
        for trimval in listlengths[subj]:
            toydata.trim = trimval
            print str(subj), str(toydata.trim)
            [data, irts, alter_graph] = rw.genX(graph, toydata)
            data = numToAnimal(data, items)[0]