def wordshow(word):
    filedata = u.readfilenn(wnidsfile)
    data = []
    for x in filedata:
        if word in x:
            data.append(x.split("\t")[1])
    return data
def downloadurls(word):
    wnid = wordstownid(word)
    filename = imagenetdir+word.replace(" ","_")+".urls"
    urllib.urlretrieve("http://www.image-net.org/api/text/imagenet.synset.geturls?wnid="+wnid,filename)
    data = u.readfilenn(filename)
    if data[0].startswith("http://") == False:
        os.remove(filename)
        print "file contains no urls, removed"
    else:
        print "saved"
def comments():
    files = [x for x in os.listdir("/home/umar/pythonfiles/")
             if x.endswith(".py") == True]
    data = []
    for thefile in files:
        filedata = [thefile]
        lines = u.readfilenn("/home/umar/pythonfiles/"+thefile)
        for line in lines:
            if line.startswith("#") == True:
                filedata.append(line)
        data.append(filedata)
    return data
def html():
    return h.h("my bookmarks",[
        h.returntohome(),
        [[h.newtabopen(x[1],x[0]),h.space()] for x in bookmarks()],br,
        [[br,h.newtabopen(x,x)]
         for x in
         u.readfilenn("/home/umar/addedbookmarks")],br,
        "reddits",br,
        h.tabularize([h.newtabopen("http://reddit.com/r/"+"+".join(x[1]),
                                  x[0]) for x in reddits()],2),br,
        "google news searches",br,
        h.tabularize(googlenewsurls(),2),
        "google video searches",br,
        googlevideosearches(),
        h.returntohome()
        ])
def wordstownid(words):
    filedata = u.readfilenn(wnidsfile)
    for x in filedata:
        y = x.split("\t")[1]
        if y == words:
            return x.split("\t")[0]
def googlenewsurls():
    return [h.newtabopen(googlenewsurl(x),x)
            for x in u.readfilenn("/home/umar/googlenewssearches")]
def bookmarks():
    return [x.split(",")
        for x in u.readfilenn("/home/umar/bookmarks")]