def main(): uri = "http://localhost:5985/" Perscon_utils.init_url (uri) configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "perscon", "perscon.conf") config.parse(configfile) base = config.get('photofiles', 'base') print "base dir is %s" % base for root, dirs, files in os.walk(base): for f in files: skip = False fname = os.path.join(root, f) meta = {} root_name,ext = os.path.splitext(fname) fin = open(fname, 'rb') try: print "reading %s" % fname data = fin.read() fin.seek(0) exif_tags = EXIF.process_file(fin) except: print >> sys.stderr, "error reading: %s" % fname skip = True finally: fin.close() if skip or (exif_tags == {}): print "skipping" continue if exif_tags.has_key('EXIF DateTimeOriginal'): raw = str(exif_tags['EXIF DateTimeOriginal']) tm = dateutil.parser.parse(raw) tt = tm.timetuple() else: tt = datetime.fromtimestamp(os.path.getmtime(fname)).timetuple() tstamp = time.mktime(tt) guid = hashlib.md5(file(fname).read()).hexdigest() uid = guid + ext m = { 'type':'org.perscon.photofiles', 'mtime':tstamp, 'att': [uid], 'uid': guid, 'frm': [], 'to':[] } # rpath = relpath(root,base) print base print fname m['caption'] = os.path.join(base, os.path.basename(fname)) mime,mime_enc = mimetypes.guess_type(fname) Perscon_utils.rpc('att/'+uid, headers={'content-type': mime,'content-length': len(data)}, data=data) meta['file_path'] = fname m['meta'] = meta mj = simplejson.dumps(m, indent=2) print mj Perscon_utils.rpc('thing/' + uid, data=mj)
def main(argv = None): """ main entry point """ configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "perscon", "perscon.conf") config.parse(configfile) service = "google.com" username = config.user(service) password = keyring.get_password(service, username) gd_client = gdata.photos.service.PhotosService() gd_client.email = username gd_client.password = password gd_client.source = 'py-perscon-v01' gd_client.ProgrammaticLogin() uri = "http://localhost:5985/" Perscon_utils.init_url(uri) ##### # Get pictures from Picasa ##### albums = gd_client.GetUserFeed(user=username) # iterate over albums for album in albums.entry: print 'title: %s, number of photos: %s, id: %s' % (album.title.text, album.numphotos.text, album.gphoto_id.text) album_id = album.gphoto_id.text # iterate over pictures photos = gd_client.GetFeed('/data/feed/api/user/%s/albumid/%s?kind=photo' % (username, album_id)) for photo in photos.entry: print 'Photo title:', photo.title.text image_url = photo.content.src uid = photo.gphoto_id.text mime,mime_enc = mimetypes.guess_type(photo.content.src) if not mime: mime = 'application/octet-stream' fin = urllib2.urlopen(image_url) data = fin.read() fin.close() Perscon_utils.rpc('att/'+uid, headers={'Content-type':mime,'Content-length':len(data)}, data=data) tstamp = photo.timestamp.text m = {'origin':'com.google.picasa', 'mtime':tstamp, 'att': [uid], 'uid': uid, 'tags':[] } meta={}
def parseObject(entry, client): """Parses a Google Docs entry (document) and stores it.""" m = { 'origin':'com.google.docs' } # Parse the date stamp returned by the GDocs API # in the format 2010-01-31T17:07:39.183Z d = datetime.strptime(entry.updated.text, "%Y-%m-%dT%H:%M:%S.%fZ") m['mtime'] = time.mktime(d.timetuple()) info = { 'type': entry.GetDocumentType(), 'uri': entry.id.text } acl_feed = client.GetDocumentListAclFeed(entry.GetAclLink().href) readers = [] writers = [] for acl_entry in acl_feed.entry: # Set 'from' to be the document owner if (acl_entry.role.value == 'owner'): m['frm'] = [{ 'ty' : entry.GetDocumentType(), 'id': acl_entry.scope.value }] # Gather readers and writers elif (acl_entry.role.value == 'writer'): writers.append(acl_entry.scope.value) elif (acl_entry.role.value == 'reader'): readers.append(acl_entry.scope.value) else: print "ERROR: unrecognised ACL detected" print '%s - %s (%s)' % (acl_entry.role.value, acl_entry.scope.value, acl_entry.scope.type) # Map writers to 'to' field m['to'] = map(lambda x: { 'ty': entry.GetDocumentType(), 'id' : x }, writers) # Map readers to 'cc' field #m['cc'] = map(lambda x: { 'ty': entry.GetDocumentType(), 'id' : x }, readers) meta={} meta.update(info) m['meta'] = meta h = hashlib.sha1() h.update(entry.title.text) h.update(entry.resourceId.text) uid = h.hexdigest() m['uid'] = uid mj = simplejson.dumps(m,indent=2) # print mj Perscon_utils.rpc('thing/' + uid, data=mj)
def stash_tweets(service, account, tweets): info = { 'origin': 'com.twitter', 'account': account, } for tw in tweets: if Verbose: print >>sys.stderr, "raw:", sj.dumps(tw, indent=2) data = { 'meta': info.copy(), } data['meta']['type'] = TWTY.tweet data['meta']['text'] = tw['text'] mtime = dateutil.parser.parse(tw['created_at']) data['meta']['mtime'] = time.mktime(mtime.timetuple()) uid = hashlib.sha1(service+account+str(tw['id'])).hexdigest() data['uid'] = uid if 'sender' in tw and tw['sender']: data['meta']['type'] = TWTY.direct data['frm'] = [ addr(service, tw['sender_screen_name']) ] data['to'] = [ addr(service, tw['recipient_screen_name']) ] else: try: data['frm'] = [addr(service, tw['from_user'])] except KeyError: data['frm'] = [addr(service, tw['user']['screen_name'])] try: data['to'] = [addr(service, tw['to_user'])] except KeyError: data['to'] = [addr(service, None)] if 'in_reply_to_screen_name' in tw and tw['in_reply_to_screen_name']: data['meta']['type'] = TWTY.reply data['to'] = [addr(service, tw['in_reply_to_screen_name'])] if 'retweeted_status' in tw and tw['retweeted_status']: data['meta']['type'] = TWTY.retweet data['meta']['source'] = tw['retweeted_status']['user']['screen_name'] ctime = dateutil.parser.parse(tw['retweeted_status']['created_at']) data['meta']['ctime'] = time.mktime(ctime.timetuple()) dataj = sj.dumps(data, indent=2) if Verbose: print >>sys.stderr, dataj Perscon_utils.rpc("thing/%s" % (uid, ), data=dataj)
def register_credential(svc, usr): pwd = keyring.get_password(svc, usr) uid = hashlib.sha1("%s" % (svc,)).hexdigest() data = { 'uid': uid, 'svc': svc, 'usr': usr, 'pwd': pwd, } print >>sys.stderr, "register_credential:", svc, usr, uid Perscon_utils.rpc("credential/%s" % (uid, ), data=sj.dumps(data, indent=2))
elif a== 'call': mode = 'Call' else: usage() if len(args) != 1 or not mode: usage() conn = sqlite3.connect(args[0]) c = conn.cursor() if mode == 'SMS': res = parseSMS(c, uid_prefix) elif mode == 'Call': res = parseCall(c, uid_prefix) for uid in res: mj = simplejson.dumps(res[uid], indent=2) try: Perscon_utils.rpc ('thing/' + uid, data=mj) except urllib2.HTTPError as e: print e.read () print mj sys.exit(1) def normalize_phone(p): import re if len(p) < 1: return p pn = re.sub('[^0-9|\+]','',p) if len(pn) < 1: return pn if pn[0:1] == "00" and len(pn) > 2: pn = "+%s" % pn[2:] elif pn[0] == '0':
body = lxml.html.fromstring(body).text_content() data['meta']['text'] = body # this message originated from the current user, so its from us # and to the participants data['frm'] = [{ 'ty' : service, 'id': sender }] if sender == account: data['to'] = map(lambda x: { 'ty': service, 'id' : x }, participants) else: data['to'] = [{ 'ty' :service, 'id': account }] uid = hashlib.sha1(service+account+sender+tm+body).hexdigest() data['uid'] = uid dataj = simplejson.dumps(data, indent=2) Perscon_utils.rpc('thing/%s' % (uid,), data=dataj) def main(): logdir = "%s/Library/Application Support/Adium 2.0/Users/Default/Logs/" % os.getenv("HOME") if not os.path.isdir(logdir): print >> sys.stderr, "Unable to find Adium log dir in: %s" % logdir sys.exit(1) uri = "http://localhost:5985/" Perscon_utils.init_url (uri) for root, dirs, files in os.walk(logdir): for f in files: logfile = os.path.join(root, f) parseLog(logfile) if __name__ == "__main__": main()
def parse_photos(): home = os.getenv("HOME") or exit(1) book = AddressBook.ABAddressBook.sharedAddressBook() addrs = book.me().valueForProperty_(AddressBook.kABEmailProperty) myemail = addrs.valueAtIndex_(addrs.indexForIdentifier_(addrs.primaryIdentifier())) fname = book.me().valueForProperty_(AddressBook.kABFirstNameProperty) lname = book.me().valueForProperty_(AddressBook.kABLastNameProperty) name = "%s %s" % (fname, lname) from_info = { 'ty': 'email', 'id' : myemail } base = os.path.join(home, "Pictures/iPhoto Library") idb = os.path.join(base, 'iPhotoMain.db') fdb = os.path.join(base, 'face.db') conn = sqlite3.connect('') c = conn.cursor() c.execute("attach database '%s' as i" % idb) c.execute("attach database '%s' as f" % fdb) sql = "select f.face_name.name,f.face_name.email,relativePath from i.SqFileInfo inner join i.SqFileImage on (i.SqFileImage.primaryKey = i.SqFileInfo.primaryKey) inner join i.SqPhotoInfo on (i.SqFileImage.photoKey = i.SqPhotoInfo.primaryKey) inner join f.detected_face on (f.detected_face.image_key = i.SqFileImage.photoKey) inner join f.face_name on (f.detected_face.face_key = f.face_name.face_key) where f.face_name.name != '' and relativePath=?" fname = "%s/Pictures/iPhoto Library/AlbumData.xml" % os.getenv("HOME") pl = plistlib.readPlist(fname) version="%s.%s" % (pl['Major Version'], pl['Minor Version']) app_version = pl['Application Version'] if not (app_version.startswith('8.1')): print >> sys.stderr, "This script only works with iPhoto 8.1, found version %s" % app_version exit(1) images = pl['Master Image List'] keywords = pl['List of Keywords'] rolls = pl['List of Rolls'] for roll in rolls: roll_id = roll['RollID'] for img_id in roll['KeyList']: img = images[img_id] if 'OriginalPath' in img: img_path = img['OriginalPath'] else: img_path = img['ImagePath'] rel_path = (relpath(img_path, base),) root,ext = os.path.splitext(img_path) uid = img['GUID'] + ext mime,mime_enc = mimetypes.guess_type(img_path) if not mime: mime = 'application/octet-stream' fin = open(img_path, 'rb') data = fin.read() fin.close() Perscon_utils.rpc('att/'+uid, headers={'Content-type':mime,'Content-length':len(data)}, data=data) tstamp,tt = ti_to_tt(img['DateAsTimerInterval']) m = {'origin':'com.apple.iphoto', 'mtime':tstamp, 'att': [uid], 'uid': uid, 'tags':[] } meta={} if 'Rating' in img: meta['rating'] = img['Rating'] if 'Comment' in img and img['Comment'] != '': meta['comment'] = img['Comment'] if 'Keywords' in img: kw = map(lambda x: keywords[x], img['Keywords']) m['tags'] = kw if 'Caption' in img: meta['caption'] = img['Caption'] meta['file_path'] = relpath(img_path, base) c.execute(sql, rel_path) m['frm'] = [from_info] m['to'] = [] # fin = open(img_path, 'rb') # try: # mtags = EXIF.process_file(fin) # except: # pass # fin.close() # m['tags'].extend(mtags) for row in c: fname=row[0] email=row[1] if email: m['to'].append({'ty':'email', 'id':email}) print m m['meta'] = meta mj = simplejson.dumps(m, indent=2) #print mj Perscon_utils.rpc('thing/' + uid, data=mj)