def update_recent(alldata,docs): """Update the recent files taking care to remove duplicates and keep the list from exceeding MAXDOCLENGTH""" # remove duplicates data2 = [a['fullpath'] for a in alldata] data2.reverse() # most recent item is now last again junk = [] for f in data2: if f not in docs: junk.append(f) junk = junk + docs # remove old items if list is too long if len(junk) > MAXDOCLENGTH: idx = len(junk) - MINDOCLENGTH junk = junk[idx:] header.write_database(junk,RECENTDATA,mode='w')
def open_files(mimetypes,docs): """Open files given their mimetypes.""" if len(mimetypes) != len(docs): print "Number of mimetypes and docs different!" sys.exit() if os.path.exists(MIMEDATA): fp = open(MIMEDATA,"r") # read file, ignore anything after a comment character, #, and strip off # whitespace and \n chars data = [line.split('#')[0].strip() for line in fp] # get rid of empty lines (where # was first char on line) and split by # whitespace data = [line.split() for line in data if len(line) > 0] data2 = [line[0] for line in data] # get a list of the mimetypes alone fp.close() else: print "No MIME types have been defined!" sys.exit() for mime,doc in zip(mimetypes,docs): if mime is None: continue try: idx = data2.index(mime) except ValueError: # try again, but use generic mimetype this time try: junk = mime.split('/')[0] # get basepart of mime type, e.g. text idx = data2.index(junk) print '### Warning! "%s" has unknown mimetype "%s". Using default for "%s"' %(doc,mime,junk) except ValueError: print '### Warning! "%s" has unknown mimetype "%s". No default found for "%s"' %(doc,mime,junk) continue app = ' '.join(data[idx][1:]) if app == 'nc': os.system('%s "%s"' %(app,doc)) else: os.system('%s "%s" &' %(app,doc)) header.write_database(docs,RECENTDATA) update_recent(alldata,docs)
else: #no arguments given header.print_database(data) if choice not in ["","h"]: #unless we just wanted list, get args line=raw_input("Choose: ") if line!="": itemlist = header.read_range(line.split(),len(data)) if choice in ('cp','mv','ck','ln'): newitemlist = [] # list of items that exist errcode = [] # list of files that failed to move/copy/link for idx in itemlist: if os.path.exists(data[idx]['fullpath']): newitemlist.append(idx) errcode.append(dostuff(data[idx],choice)) else: print 'Item "%s" does not exist. Skipping' %data[idx]['fullpath'] itemlist = newitemlist elif choice == 'rm': errcode = [0]*len(itemlist) if choice in ('mv','rm'): header.remove_database(itemlist,SHELFDATA,errcode) else: # add file to shelf data = header.read_database(SHELFDATA) data2 = [a['fullpath'] for a in data] docs = header.read_cmdline(sys.argv[1:]) junk = list(set(docs) - set(data2)) junk.sort(header.smart_sort) header.write_database(junk,SHELFDATA)
elif flag == 'a': # add namelist = [s['name'] for s in data] location = sys.argv[-1] docs = header.read_cmdline(sys.argv[2:-1]) newlist = [] asource = {'name' : '', 'path' : ''} for f in docs: junk,name = os.path.split(f) if name in namelist: print '"%s" already defined. Skipping' %name else: asource['name'] = name asource['path'] = location newlist.append(asource.copy()) header.write_database(newlist,syncFile) elif flag in ('d','u','x'): if len(sys.argv) < 3: docs = list_syncs(data) else: tmp = [header.to_lower(f) for f in sys.argv[2:]] for f in tmp: if tmp[-1] == '/': tmp = tmp[:-1] # strip off any trailing forward slash docs = grep_data(tmp,data) if flag == 'x': # delete deletelist = [s['name'] for s in docs] # names of documents to delete namelist = [s['name'] for s in data] # names of all documents idxlist = [namelist.index(a) for a in deletelist] # indexes to delete if len(idxlist) == 0: print "No locations deleted"