class DataBase(): def __init__(self): self.db=Base(mypath) self.sfield = {'शब्द': 'Words', 'पद': 'Speech', 'अर्थ': 'Meaning', 'पर्यायवाचि': 'Synonym', 'विपरीतार्थक': 'Antonym', 'अंग्रेजी': 'English'} self.db.open() def _get_attribute(self,args): ret_val = {} for k,f in self.sfield.items(): ret_val[k] = getattr(args, f) if ret_val[k] == None: ret_val.pop(k) return ret_val def select(self,value): self.record = self.db.select([f for f in self.sfield.itervalues()],Words=value) for v in self.record: self.g = self._get_attribute(v) return self.g def fields(self): return self.db.field_names def get_field_length(self, value): t = self.select(value) return len(self.g)
class RepDB: def __init__(self, path): self.path = path self.dbh_stored_blocks = Base(self.path) try: self.dbh_stored_blocks.create(('key', str), ('version', str)) except IOError: pass def open(self): self.dbh_stored_blocks.open() def add(self, oid, block_id, version): key = str((oid, block_id)) # lets see if we already have a key stored set = self.dbh_stored_blocks.select_for_update(['key', 'version'], key=key) if set == []: self.dbh_stored_blocks.insert(key, str(version)) else: set[0].update() def get(self, oid, block_id): key = str((oid, block_id)) result = self.dbh_stored_blocks.select(['key', 'version'], key=key) return result[0].version def update(self, oid, block_id, version): self.add(oid, block_id, version) def delete(self, oid, block_id): key = str((oid, block_id)) set = self.dbh_stored_blocks.select_for_update(['key', 'version'], key=key) self.dbh_stored_blocks.delete(set[0]) def close(self): self.dbh_stored_blocks.close() def getIterator(self): return RepDBIterator([record for record in self.dbh_stored_blocks])
class RepDB: def __init__(self, path): self.path = path self.dbh_stored_blocks = Base(self.path) try: self.dbh_stored_blocks.create(('key', str), ('version', str)) except IOError: pass def open(self): self.dbh_stored_blocks.open() def add(self, oid, block_id, version): key = str((oid, block_id)) # lets see if we already have a key stored set = self.dbh_stored_blocks.select_for_update(['key','version'],key=key) if set == []: self.dbh_stored_blocks.insert(key, str(version)) else: set[0].update() def get(self, oid, block_id): key = str((oid, block_id)) result = self.dbh_stored_blocks.select(['key','version'],key=key) return result[0].version def update(self, oid, block_id, version): self.add(oid, block_id, version) def delete(self, oid, block_id): key = str((oid, block_id)) set = self.dbh_stored_blocks.select_for_update(['key','version'],key=key) self.dbh_stored_blocks.delete(set[0]) def close(self): self.dbh_stored_blocks.close() def getIterator(self): return RepDBIterator([record for record in self.dbh_stored_blocks])
class DB: def __init__(self, storage_path): self.dbh_objects = Base(os.path.join(storage_path, 'objects')) self.dbh_blocks = Base(os.path.join(storage_path, 'blocks')) self.dbh_replicas = Base(os.path.join(storage_path, 'replicas')) self.dbh_tree = Base(os.path.join(storage_path, 'tree')) self.dbh_paths = Base(os.path.join(storage_path, 'paths')) self.dbh_id = Base(os.path.join(storage_path, 'id')) self.dbh_tags = Base(os.path.join(storage_path, 'tags')) self.storage_path = storage_path def __create_root(self): """ Check if the filesystem has a / and if not create it""" print "Initializing filesystem..." if self.get_file(path='/'): return print "Creating root..." f = FSObject(1,1,'/',0,0,0,0) # lets see if we already have a key stored set = self.dbh_objects.select(['oid'],oid=str(f.oid)) if set == []: # we have create tree and paths first self.dbh_tree.insert(str(f.oid), str(f.parent)) self.dbh_paths.insert(str((f.parent, f.path))) self.dbh_objects.insert(str(f.oid), dumps(f), self.dbh_tree[len(self.dbh_tree)-1], self.dbh_paths[len(self.dbh_paths)-1]) #set the current oid for the id increment sequence set = self.dbh_id.select(['curr_oid']) if set == []: self.dbh_id.insert('1') def setup_fs_db(self): try: self.dbh_blocks.create(('key', str), ('blocks', str)) except IOError: self.dbh_blocks.open() try: self.dbh_replicas.create(('key', str), ('replicas', str)) except IOError: self.dbh_replicas.open() try: self.dbh_tree.create(('oid', str), ('parent', str)) except IOError: self.dbh_tree.open() try: self.dbh_tags.create(('oid', str), ('tag', str)) except IOError: self.dbh_tags.open() try: self.dbh_paths.create(('key', str)) except IOError: self.dbh_paths.open() try: self.dbh_id.create(('curr_oid', str)) except IOError: self.dbh_id.open() try: self.dbh_objects.create(('oid', str), ('fsobj', str), ('tree', self.dbh_tree), ('paths', self.dbh_paths)) except IOError: self.dbh_objects.open() self.__create_root() def get_path_oid(self, path): """Gets the parent filenode for path""" nodes = [] parent_path = path while 1: (parent_path,node) = os.path.split(parent_path) if node == '': nodes.insert(0,'/') break nodes.insert(0,node) parent_oid = 0 for node_name in nodes: key = str((parent_oid, node_name)) print "looking up: %s" % key # search for a match f = None for record in [record for record in self.dbh_objects]: if record.paths.key == key: f = loads(record.fsobj) break print "found it!" if not f: return 0 parent_oid = f.oid return parent_oid def insert_file(self, path, fsobj): #check first if there is a parent directory to store this file f = self.get_file(path=path) print "inserting file with path: "+path print fsobj if not f: print "ERR: [%s]" % os.path.split(fsobj.path)[0] raise FileSystemError('No parent directory to store: %s' % fsobj.path) #the parent of this object is the path fsobj.parent = f.oid set = self.dbh_id.select_for_update(['curr_oid']) curr_oid = int(set[0].curr_oid) + 1 fsobj.oid = curr_oid print "Inserting OID: %s" % fsobj # lets see if we already have a key stored result = self.dbh_objects.select(['oid','fsobj'],oid=str(fsobj.oid)) if result != []: raise FileSystemError('File already exists') else: # we have create tree and paths first self.dbh_tree.insert(str(fsobj.oid), str(fsobj.parent)) self.dbh_paths.insert(str((fsobj.parent, fsobj.path))) self.dbh_objects.insert(str(fsobj.oid), dumps(fsobj), self.dbh_tree[len(self.dbh_tree)-1], self.dbh_paths[len(self.dbh_paths)-1]) set[0].update(curr_oid=str(curr_oid)) return curr_oid def get_file(self, oid='', path=''): if oid: set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: f = None else: f = set[0].fsobj elif path: if path == '/': key = str((0,'/')) else: parent_oid = self.get_path_oid(os.path.split(path)[0]) node_name = os.path.split(path)[1] key = str((parent_oid, node_name)) # search for a match f = None for record in [record for record in self.dbh_objects]: print record.paths.key if record.paths.key == key: f = record.fsobj break else: f = None if f: f = loads(f) return f def get_children(self, oid): # lookup FSOBJECT with given oid set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: return [] file_array = [] # lookup objects with parent oid set = self.dbh_tree.select(['oid', 'parent'], parent=str(oid)) for i in set: obj = self.dbh_objects.select(['oid', 'fsobj'], oid=str(i.oid)) if obj != []: file_array.append(loads(obj[0].fsobj)) return file_array def debug_print_db(self, db): pass def print_object_db(self): self.debug_print_db(self.dbh_objects) def delete_dir(self,oid): pass def delete_file(self, oid): pass def rename_file(self,src,dest): pass def update_file(self, fsobj): set = self.dbh_objects.select_for_update(['oid', 'fsobj'], oid=str(fsobj.oid)) if set != []: set[0].update(fsobj=dumps(fsobj)) def add_block(self, block, serverid): f = self.get_file(oid=str(block.oid)) if not f: raise FileSystemError('add_block: Object %s does not exist' % block.oid) key = str((long(block.oid),long(block.block_id))) #the key is both the oid and the block_id set1 = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set1 == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set1[0].replicas) f.blocks[block.block_id] = block.version set2 = self.dbh_blocks.select_for_update(['key', 'blocks'], key=key) if set2 == []: b = None else: b = set2[0].block if b: b = loads(b) diff = block.size - b.size else: diff = block.size f.size += diff # update or insert? if set1 == []: self.dbh_blocks.insert(key, dumps(block)) else: set1[0].update(blocks=dumps(block)) self.update_file(f) replicas.add(serverid, block.version) # update or insert? if set2 == []: self.dbh_replicas.insert(key,dumps(replicas)) else: set2[0].update(replicas=dumps(replicas)) def add_block_replica(self, block, serverid): f = self.get_file(str(block.oid)) if not f: raise FileSystemError('add_block_replica: Object %s does not exist' % block.oid) key = str((block.oid, block.block_id)) set = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set[0].replicas) replicas.add(serverid, block.version) # update or insert? if set == []: self.dbh_replicas.insert(key,dumps(replicas)) else: set[0].update(replicas=dumps(replicas)) def get_block_replicas(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_replicas.select(['key', 'replicas'], key=key) if set == []: return None return loads(set[0].replicas) def get_block(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_blocks.select(['key', 'blocks'], key=key) if set == []: return None return loads(set[0].blocks) def print_replicas_db(self): self.debug_print_db(self.dbh_replicas) def close_fs_db(self): self.dbh_blocks.close() self.dbh_replicas.close() self.dbh_tree.close() self.dbh_id.close() self.dbh_paths.close() self.dbh_objects.close()
import sys import os sys.path.append(os.getcwd()+"\\lib") from bs4 import BeautifulSoup, SoupStrainer import urllib2 from urllib2 import Request, urlopen, URLError, HTTPError from buzhug import Base import re import unicodedata num_imgs=0 imagesDB = Base(os.getcwd()+"\\Databases\\training_images.db") try: imagesDB.open() except IOError: print "creaitng imabegase" imagesDB.create(('title',str),("url",str),("score",float),("fileloc",str )) ##imagesDB.create(('title',str),("url",str),("score",float),("fileloc",str ), mode="override") challenges = {(1005,110),(1154,44),(430,82),(296,52), (1412,11),(980,52),(616,70),(536,67),(707,64),(431,61),(565,61)} ##challenges = {(430,1),(1154,1)} for challenge in challenges: print "challenge #",challenge[0] for i in range(1,challenge[1]+1):#pages on the website print "ripping page " + str(i) page = urllib2.urlopen("<http://www.dpchallenge.com/challenge_results.php?CHALLENGE_ID="+str(challenge[0])+"&page="+str(i)+">") rawHtml = page.read() anchors = SoupStrainer("a", {'class':'i'}) soup = BeautifulSoup(rawHtml, parse_only=anchors)
#!/usr/bin/python2.7 #this is a commandline utility to quickly get book text for testing purposes from buzhug import Base import os import sys import json db = Base(os.getcwd() + '/db/bookDB') db.open() c=0 l = len(sys.argv) while c < l-1: c+=1 inp = sys.argv[c] dupProtect = True bookID = int(inp) records = db.select(ID = bookID) bookText = '' slug = '' for r in records: j = json.loads(r.json) if j.get('ID') == bookID: text = j.get('pages') for p in text: bookText += p['text'] + '\n' slug = r.slug if dupProtect: break print bookText db.close()
class DB: def __init__(self, storage_path): self.dbh_objects = Base(os.path.join(storage_path, 'objects')) self.dbh_blocks = Base(os.path.join(storage_path, 'blocks')) self.dbh_replicas = Base(os.path.join(storage_path, 'replicas')) self.dbh_tree = Base(os.path.join(storage_path, 'tree')) self.dbh_paths = Base(os.path.join(storage_path, 'paths')) self.dbh_id = Base(os.path.join(storage_path, 'id')) self.dbh_tags = Base(os.path.join(storage_path, 'tags')) self.storage_path = storage_path def __create_root(self): """ Check if the filesystem has a / and if not create it""" print "Initializing filesystem..." if self.get_file(path='/'): return print "Creating root..." f = FSObject(1, 1, '/', 0, 0, 0, 0) # lets see if we already have a key stored set = self.dbh_objects.select(['oid'], oid=str(f.oid)) if set == []: # we have create tree and paths first self.dbh_tree.insert(str(f.oid), str(f.parent)) self.dbh_paths.insert(str((f.parent, f.path))) self.dbh_objects.insert(str(f.oid), dumps(f), self.dbh_tree[len(self.dbh_tree) - 1], self.dbh_paths[len(self.dbh_paths) - 1]) #set the current oid for the id increment sequence set = self.dbh_id.select(['curr_oid']) if set == []: self.dbh_id.insert('1') def setup_fs_db(self): try: self.dbh_blocks.create(('key', str), ('blocks', str)) except IOError: self.dbh_blocks.open() try: self.dbh_replicas.create(('key', str), ('replicas', str)) except IOError: self.dbh_replicas.open() try: self.dbh_tree.create(('oid', str), ('parent', str)) except IOError: self.dbh_tree.open() try: self.dbh_tags.create(('oid', str), ('tag', str)) except IOError: self.dbh_tags.open() try: self.dbh_paths.create(('key', str)) except IOError: self.dbh_paths.open() try: self.dbh_id.create(('curr_oid', str)) except IOError: self.dbh_id.open() try: self.dbh_objects.create(('oid', str), ('fsobj', str), ('tree', self.dbh_tree), ('paths', self.dbh_paths)) except IOError: self.dbh_objects.open() self.__create_root() def get_path_oid(self, path): """Gets the parent filenode for path""" nodes = [] parent_path = path while 1: (parent_path, node) = os.path.split(parent_path) if node == '': nodes.insert(0, '/') break nodes.insert(0, node) parent_oid = 0 for node_name in nodes: key = str((parent_oid, node_name)) print "looking up: %s" % key # search for a match f = None for record in [record for record in self.dbh_objects]: if record.paths.key == key: f = loads(record.fsobj) break print "found it!" if not f: return 0 parent_oid = f.oid return parent_oid def insert_file(self, path, fsobj): #check first if there is a parent directory to store this file f = self.get_file(path=path) print "inserting file with path: " + path print fsobj if not f: print "ERR: [%s]" % os.path.split(fsobj.path)[0] raise FileSystemError('No parent directory to store: %s' % fsobj.path) #the parent of this object is the path fsobj.parent = f.oid set = self.dbh_id.select_for_update(['curr_oid']) curr_oid = int(set[0].curr_oid) + 1 fsobj.oid = curr_oid print "Inserting OID: %s" % fsobj # lets see if we already have a key stored result = self.dbh_objects.select(['oid', 'fsobj'], oid=str(fsobj.oid)) if result != []: raise FileSystemError('File already exists') else: # we have create tree and paths first self.dbh_tree.insert(str(fsobj.oid), str(fsobj.parent)) self.dbh_paths.insert(str((fsobj.parent, fsobj.path))) self.dbh_objects.insert(str(fsobj.oid), dumps(fsobj), self.dbh_tree[len(self.dbh_tree) - 1], self.dbh_paths[len(self.dbh_paths) - 1]) set[0].update(curr_oid=str(curr_oid)) return curr_oid def get_file(self, oid='', path=''): if oid: set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: f = None else: f = set[0].fsobj elif path: if path == '/': key = str((0, '/')) else: parent_oid = self.get_path_oid(os.path.split(path)[0]) node_name = os.path.split(path)[1] key = str((parent_oid, node_name)) # search for a match f = None for record in [record for record in self.dbh_objects]: print record.paths.key if record.paths.key == key: f = record.fsobj break else: f = None if f: f = loads(f) return f def get_children(self, oid): # lookup FSOBJECT with given oid set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: return [] file_array = [] # lookup objects with parent oid set = self.dbh_tree.select(['oid', 'parent'], parent=str(oid)) for i in set: obj = self.dbh_objects.select(['oid', 'fsobj'], oid=str(i.oid)) if obj != []: file_array.append(loads(obj[0].fsobj)) return file_array def debug_print_db(self, db): pass def print_object_db(self): self.debug_print_db(self.dbh_objects) def delete_dir(self, oid): pass def delete_file(self, oid): pass def rename_file(self, src, dest): pass def update_file(self, fsobj): set = self.dbh_objects.select_for_update(['oid', 'fsobj'], oid=str(fsobj.oid)) if set != []: set[0].update(fsobj=dumps(fsobj)) def add_block(self, block, serverid): f = self.get_file(oid=str(block.oid)) if not f: raise FileSystemError('add_block: Object %s does not exist' % block.oid) key = str( (long(block.oid), long(block.block_id))) #the key is both the oid and the block_id set1 = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set1 == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set1[0].replicas) f.blocks[block.block_id] = block.version set2 = self.dbh_blocks.select_for_update(['key', 'blocks'], key=key) if set2 == []: b = None else: b = set2[0].block if b: b = loads(b) diff = block.size - b.size else: diff = block.size f.size += diff # update or insert? if set1 == []: self.dbh_blocks.insert(key, dumps(block)) else: set1[0].update(blocks=dumps(block)) self.update_file(f) replicas.add(serverid, block.version) # update or insert? if set2 == []: self.dbh_replicas.insert(key, dumps(replicas)) else: set2[0].update(replicas=dumps(replicas)) def add_block_replica(self, block, serverid): f = self.get_file(str(block.oid)) if not f: raise FileSystemError( 'add_block_replica: Object %s does not exist' % block.oid) key = str((block.oid, block.block_id)) set = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set[0].replicas) replicas.add(serverid, block.version) # update or insert? if set == []: self.dbh_replicas.insert(key, dumps(replicas)) else: set[0].update(replicas=dumps(replicas)) def get_block_replicas(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_replicas.select(['key', 'replicas'], key=key) if set == []: return None return loads(set[0].replicas) def get_block(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_blocks.select(['key', 'blocks'], key=key) if set == []: return None return loads(set[0].blocks) def print_replicas_db(self): self.debug_print_db(self.dbh_replicas) def close_fs_db(self): self.dbh_blocks.close() self.dbh_replicas.close() self.dbh_tree.close() self.dbh_id.close() self.dbh_paths.close() self.dbh_objects.close()
class TransactionsDB(object): BASE = 'banan/transactions' def __init__(self, conf): self.config = conf self._sessions = {} self.open() # Management def open(self): self.db = Base(TransactionsDB.BASE) try: self.db.open() except IOError: self.db.create(('amount' , float), ('amount_local', float), ('date' , date), ('account' , str), ('label' , str), ('currency' , str)) def close(self): self.db.close() def clearall(self): self.db.destroy() self.open() def insert(self, entry): self.db.insert(amount = entry['amount'], amount_local = entry['amount_local'], date = entry['date'], account = entry['account'], label = entry['label'], currency = entry['currency']) def feed(self, fpath, parser, skip_duplicates=True, overwrite=False, delete=False, dry_run=False): deleted = added = 0 for entry in parser.parse(fpath): if dry_run: print('%s %-40s\t%12.2f %s' % (entry['date'].isoformat(), entry['account'][:40], entry['amount'], entry['currency'])); continue if skip_duplicates or overwrite or delete: _dup = self.db(date=entry['date'], account=entry['account'], amount=entry['amount']) if _dup: if overwrite or delete: deleted += len(_dup) self.db.delete(_dup) else: continue if delete: continue entry['label'] = self.config.assign_label(entry) self.insert(entry) added += 1 if not dry_run: INFO(' added %i transactions' % added) INFO(' deleted %i transactions' % deleted) parser.post_process(self.db, added) def update_labels(self): # Load all records into memory. File will get corrupt if using the iterator. records = [rec for rec in self.db] for record in records: as_dict = dict((field, getattr(record, field)) for field in record.fields) label = self.config.assign_label(as_dict) if label != record.label: self.db.update(record, label=label) self.db.cleanup() # Queries get_amount = lambda rec: rec.amount_local def results_as_text(self, results): results.sort_by('date') idx = 0 record = results[idx] text_list = [] while True: text_list.append('%s %-40s\t%12.2f %s' % (record.date.isoformat(), unicode(record.account[:40], 'utf-8'), record.amount, record.currency)); try: idx += 1 record = results[idx] except IndexError: return text_list def assemble_data(self, sid, datatype, foreach, show, select): try: session = self._sessions.get(sid, {}) if session: if session['raw_query'] == (foreach, show, select): # Same query, return cached result return True, \ self._sessions[sid]['flot_' + show] if datatype == 'plot' else \ self._sessions[sid]['text'] # Helpers get_amount = lambda rec: rec.amount_local M = range(1,13) total = strlen = 0 data = {} text = {} query = 'date1 <= date < date2 and label == l' if foreach == 'label': if session: if session['raw_query'][0] == 'label' and session['raw_query'][2] == select: # Same query, but different presentation (sum or average) return True, \ self._sessions[sid]['flot_' + show] if datatype == 'plot' else \ self._sessions[sid]['text'] # New query dates = re.findall('[0-9]{6}', unquote_plus(select)) date1 = date2 = date(int(dates[0][2:]), int(dates[0][:2]), 1) if len(dates) == 2: date2 = date(int(dates[1][2:]), int(dates[1][:2]), 1) date2 = date(date2.year + (date2.month == 12), M[date2.month - 12], 1) for label in self.config.labels.iterkeys(): results = self.db.select(None, query, l = label, date1 = date1, date2 = date2) value = sum(map(get_amount, results)) if abs(value) > 1: data[label] = value if label not in self.config.cash_flow_ignore: total += value else: label += '*' text[label] = self.results_as_text(results) strlen = len(text[label][-1]) sumstr = '%12.2f %s' % (value, self.config.local_currency) text[label].append('-' * strlen) text[label].append(' ' * (strlen - len(sumstr)) + sumstr) ydelta = date2.year - date1.year mdelta = date2.month - date1.month delta = 12 * ydelta + mdelta session['flot_average'] = {} for key, val in data.iteritems(): session['flot_average'][key] = val/delta elif foreach in ('month', 'year'): # New query date1 = date2 = first = datetime.now() if foreach == 'month': first = date(date1.year - 1, date1.month, 1) date1 = date(date1.year - (date1.month == 1), M[date1.month - 2], 1) date2 = date(date2.year, date2.month, 1) else: first = date(date1.year - 9, 1, 1) date1 = date(date1.year, 1, 1) date2 = date(date2.year + 1, 1, 1) select = unquote_plus(select) while date1 >= first: results = self.db.select(None, query, l = select, date1 = date1, date2 = date2) value = sum(map(get_amount, results)) date2 = date1 if foreach == 'month': key = date1.strftime('%Y.%m') date1 = date(date2.year - (date2.month == 1), M[date2.month - 2], 1) else: key = str(date1.year) date1 = date(date2.year - 1, 1, 1) data[key] = value total += value if results: text[key] = self.results_as_text(results) strlen = len(text[key][-1]) sumstr = '%12.2f %s' % (value, self.config.local_currency) text[key].append('-' * strlen) text[key].append(' ' * (strlen - len(sumstr)) + sumstr) # All good, set new session attributes session['raw_query'] = (foreach, show, select) session['flot_sum'] = data session['text'] = text if session['text']: session['text']['***'] = ['-' * strlen, 'SUM: %12.2f %s' % (total, self.config.local_currency), '-' * strlen] self._sessions[sid] = session return True, session['flot_' + show] if datatype == 'plot' else session['text'] except Exception as e: return False, str(e)
print print houses[0] print houses[0].resident.name print '\nhouses with jean' for h in houses: if h.resident.name == 'jean': print h.address, print h.resident.age print '\n select with resident.name = jean' recs = houses.select([],'resident == v',v='jean') print recs h1 = Base('houses') h1.open() print '\nh1[0]' print h1[0] class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]' print h1[0] print "\nResident name: %(resident.name)s\nAddress: %(address)s" % h1[0]
def run_test(thread_safe=False): if not thread_safe: db = Base(r'dummy') else: db = TS_Base('dummy') db.create(('name',str), ('fr_name',unicode), ('age',int), ('size',int,300), ('birth',date,date(1994,1,14)), ('afloat',float,1.0), ('birth_hour', dtime,dtime(10,10,10)), mode='override') # test float conversions if thread_safe is False: f = db._file["afloat"] def all(v): return [ord(c) for c in v] for i in range(10): afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)) try: assert cmp(afloat,0.0) == cmp(f.to_block(afloat),f.to_block(0.0)) except: print afloat print "afloat > 0.0 ?",afloat>0.0 print "blocks ?",f.to_block(afloat)>f.to_block(0.0) print all(f.to_block(afloat)),all(f.to_block(0.0)) raise assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(100): db.insert(name=random.choice(names), fr_name = unicode(random.choice(fr_names),'latin-1'), age=random.randint(7,47),size=random.randint(110,175), birth=date(random.randint(1858,1999),random.randint(1,12),10), afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)), birth_hour = dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) assert len(db)==100 assert isinstance(db[50].fr_name,unicode) print db[50].fr_name.encode('latin-1') db.open() # test if default values have not been modified after open() assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(5): # insert a list db.insert(random.choice(names), unicode(random.choice(fr_names),'latin-1'), random.randint(7,47),random.randint(110,175), date(random.randint(1958,1999),random.randint(1,12),10), random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)), dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) db.insert(name=random.choice(names)) # missing fields for field in db.field_names[2:]: if field == "name": continue try: assert getattr(db[-1],field) == db.defaults[field] except: print "attribute %s not set to default value %s" %(field,db[-1]) raise # insert as string db.set_string_format(unicode,'latin-1') db.set_string_format(date,'%d-%m-%y') db.set_string_format(dtime,'%H-%M-%S') db.insert_as_strings(name="testname",fr_name=random.choice(fr_names), age=10,size=123,birth="07-10-95", birth_hour="20-53-3") assert db[-1].birth == date(1995,10,7) assert db[-1].name == "testname" assert db[-1].age == 10 assert db[-1].afloat == db.defaults["afloat"] db.insert_as_strings("testname",random.choice(fr_names), 11,134,"09-12-94",1.0, "5-6-13") assert db[len(db)-1].birth == date(1994,12,9) assert db[-1].name == "testname" assert db[-1].age == 11 assert db[-1].size == 134 assert db[-1].afloat == 1.0 # search between 2 dates print '\nBirth between 1960 and 1970' for r in db.select(None,birth=[date(1960,1,1),date(1970,12,13)]): print r.name,r.birth print "sorted" for r in db.select(None,birth=[date(1960,1,1),date(1970,12,13)]).sort_by('+name-birth'): print r.name,r.birth f = buzhug_files.FloatFile().to_block def all(v): return [ord(c) for c in f(v)] # search between 2 floats # selection by list comprehension s1 = [ r for r in db if 0.0 <= r.afloat <= 1e50 ] # selection by select s2 = db.select(None,'x<=afloat<=y',x=0.0,y=1e50) # selction by select with interval s3 = db.select(None,afloat=[0.0,1e50]) try: assert len(s1) == len(s2) == len(s3) except: print "%s records by list comprehension, " %len(s1) print "%s by select by formula," %len(s2) print "%s by select by interval" %len(s3) for r in s1: try: assert r in s2 except: print all(r.afloat) for r in s2: try: assert r in s1 except: print "in select but not in list comprehension",r raise r = db[0] assert r.__class__.db is db fr=random.choice(fr_names) s1 = [ r for r in db if r.age == 30 and r.fr_name == unicode(fr,'latin-1')] s2 = db.select(['name','fr_name'],age=30,fr_name = unicode(fr,'latin-1')) assert len(s1)==len(s2) # different ways to count the number of items assert len(db) == sum([1 for r in db]) == len(db.select(['name'])) # check if version number is correctly incremented for i in range(5): recs = db.select_for_update(['name'],'True') version = recs[0].__version__ recs[0].update() assert db[0].__version__ == version + 1 # check if cleanup doesn't change db length length_before = len(db) db.cleanup() assert len(db) == length_before # check if selection by select on __id__ returns the same as direct # access by id recs = db.select([],'__id__ == c',c=20) assert recs[0] == db[20] # check that has_key returns False for invalid hey assert not db.has_key(1000) # drop field db.drop_field('name') # check if field was actually removed from base definition and rows assert not "name" in db.fields assert not hasattr(db[20],"name") # add field db.add_field('name',str,default="marcel") # check if field was added with the correct default value assert "name" in db.fields assert hasattr(db[20],"name") assert db[20].name == "marcel" # change default value db.set_default("name","julie") db.insert(age=20) assert db[-1].name == "julie" # delete a record db.delete([db[10]]) # check if record has been deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass assert 10 not in db assert len(db) == length_before # selections # selection by generator expression # age between 30 et 32 d_ids = [] for r in [r for r in db if 33> r.age >= 30]: d_ids.append(r.__id__) length = len(db) # remove these items db.delete([r for r in db if 33> r.age >= 30]) # check if correct number of records removed assert len(db) == length - len(d_ids) # check if all records have been removed assert not [r for r in db if 33> r.age >= 30] # updates # select name = pierre s1 = db.select(['__id__','name','age','birth'],name='pierre') # make 'pierre' uppercase for record in db.select_for_update(None,'name == x',x='pierre'): db.update(record,name = record.name.upper()) # check if attribute was correctly updated for rec in s1: assert db[rec.__id__] == "Pierre" # increment ages for record in db.select_for_update([],'True'): age = record.age if not record.age is None: db.update(record,age = record.age+1) # check assert db[record.__id__].age == age + 1 for record in [r for r in db]: age = record.age if not record.age is None: db.update(record,age = record.age+1) # check assert db[record.__id__].age == age + 1 # change dates for record in db.select_for_update([],'age>v',v=35): db.update(record,birth = date(random.randint(1958,1999), random.randint(1,12),10)) db.commit() # check length after commit assert sum([1 for r in db]) == len(db) # insert new records for i in range(50): db.insert(name=random.choice(names), age=random.randint(7,47),size=random.randint(110,175)) # check that record 10 is still deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass print db.keys() print "has key 10 ?",db.has_key(10) assert 10 not in db #raw_input() # check that deleted_lines was cleared by commit() assert not db._pos.deleted_lines print db._del_rows.deleted_rows length = len(db) # before cleanup # physically remove the deleted items db.cleanup() # check that deleted_lines and deleted_rows are clean assert not db._pos.deleted_lines assert not db._del_rows.deleted_rows # check that record 10 is still deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass assert 10 not in db # check that length was not changed by cleanup assert len(db) == length assert len([ r for r in db]) == length # age > 30 for r in db.select(['__id__','name','age'], 'name == c1 and age > c2', c1 = 'pierre',c2 = 30): assert r.name == "pierre" assert r.age > 30 # name =="PIERRE" and age > 30 for r in db.select(['__id__','name','age','birth'], 'name == c1 and age > c2', c1 = 'PIERRE',c2 = 30): assert r.name == 'PIERRE' assert r.age > 30 # test with != for r in db.select(['__id__'],'name != c1',c1='claire'): assert r.name != 'claire' # age > id # with select s1 = db.select(['name','__id__','age'],'age > __id__') for r in s1: assert r.age > r.__id__ # with iter s2 = [ r for r in db if r.age > r.__id__ ] for r in s2: assert r.age > r.__id__ assert len(s1) == len(s2) # birth > date(1978,1,1) # with select s1 = db.select(['name','__id__','age'],'birth > v',v=date(1978,1,1)) for r in s1: assert r.birth > date(1978,1,1) # with iter s2 = [ r for r in db if r.birth and r.birth > date(1978,1,1) ] for r in s2: assert r.birth > date(1978,1,1) assert len(s1) == len(s2) # test with floats for i in range(10): x = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)) s1 = [ r for r in db if r.afloat > x ] s2 = db.select(['name'],'afloat > v',v=x) assert len(s1)==len(s2) # base with external link houses = Base('houses') houses.create(('address',str),('flag',bool),('resident',db,db[0]),mode="override") addresses = ['Giono','Proust','Mauriac','Gide','Bernanos','Racine', 'La Fontaine'] ks = db.keys() for i in range(50): x = random.choice(ks) address = random.choice(addresses) houses.insert(address=address,flag = address[0]>"H",resident=db[x]) # houses with jean s1 = [] for h in houses: if h.resident.name == 'jean': s1.append(h) # by select : ??? #s2 = houses.select([],'resident.name == v',v='jean') # assert len(s1) == len(s2) h1 = Base('houses') h1.open() l1 = len(h1.select([],flag=True)) l2 = len(h1.select([],flag=False)) assert l1 + l2 == len(h1) class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]' print h1[0] print "\nResident name: %(resident.name)s\nAddress: %(address)s" % h1[0]
def main(): X =[] Y=[] featuresDB = Base(os.getcwd()+"\\Databases\\features.db") featuresDB.open() print "features open" for rec in featuresDB: vec = [] vec.append(rec.f1) vec.append(rec.f3) vec.append(rec.f4) vec.append(rec.f5) vec.append(rec.f6) vec.append(rec.f7) vec.append(rec.f10) vec.append(rec.f11) vec.append(rec.f12) vec.append(rec.f13) vec.append(rec.f14) vec.append(rec.f15) vec.append(rec.f16) vec.append(rec.f17) vec.append(rec.f18) vec.append(rec.f19) vec.append(rec.f20) vec.append(rec.f21) vec.append(rec.f22) vec.append(rec.f23) X.append(vec) Y.append(rec.score) print "building classifier" Y = np.array(Y) ybar = Y.mean() for i in range(len(Y)): if Y[i]<ybar: Y[i]=1 else: Y[i]=2 scaler = Scaler().fit(X) X = scaler.transform(X) X= np.array(X) Y=np.array(Y) skf = cross_validation.StratifiedKFold(Y,k=2) for train, test in skf: X_train, X_test = X[train], X[test] y_train, y_test = Y[train], Y[test] clf = ExtraTreesClassifier(n_estimators=8,max_depth=None,min_split=1,random_state=0,compute_importances=True) scores = cross_validation.cross_val_score(clf,X_train,y_train,cv=5) clf.fit_transform(X_train,y_train) print "Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() / 2) print clf.feature_importances_ y_pred =clf.predict(X_test) print classification_report(y_test,y_pred) model=(scaler,clf) joblib.dump(model,'AestheticModel\\aestheticModel.pkl') print "Done"
def run_test(thread_safe=False): if not thread_safe: db = Base(r'dummy') else: db = TS_Base('dummy') db.create(('name', str), ('fr_name', unicode), ('age', int), ('size', int, 300), ('birth', date, date(1994, 1, 14)), ('afloat', float, 1.0), ('birth_hour', dtime, dtime(10, 10, 10)), mode='override') # test float conversions if thread_safe is False: f = db._file["afloat"] def all(v): return [ord(c) for c in v] for i in range(10): afloat = random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)) try: assert cmp(afloat, 0.0) == cmp(f.to_block(afloat), f.to_block(0.0)) except: print afloat print "afloat > 0.0 ?", afloat > 0.0 print "blocks ?", f.to_block(afloat) > f.to_block(0.0) print all(f.to_block(afloat)), all(f.to_block(0.0)) raise assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10, 10, 10) assert db.defaults["birth"] == date(1994, 1, 14) for i in range(100): db.insert(name=random.choice(names), fr_name=unicode(random.choice(fr_names), 'latin-1'), age=random.randint(7, 47), size=random.randint(110, 175), birth=date(random.randint(1858, 1999), random.randint(1, 12), 10), afloat=random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)), birth_hour=dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) assert len(db) == 100 assert isinstance(db[50].fr_name, unicode) print db[50].fr_name.encode('latin-1') db.open() # test if default values have not been modified after open() assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10, 10, 10) assert db.defaults["birth"] == date(1994, 1, 14) for i in range(5): # insert a list db.insert( random.choice(names), unicode(random.choice(fr_names), 'latin-1'), random.randint(7, 47), random.randint(110, 175), date(random.randint(1958, 1999), random.randint(1, 12), 10), random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)), dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) db.insert(name=random.choice(names)) # missing fields for field in db.field_names[2:]: if field == "name": continue try: assert getattr(db[-1], field) == db.defaults[field] except: print "attribute %s not set to default value %s" % (field, db[-1]) raise # insert as string db.set_string_format(unicode, 'latin-1') db.set_string_format(date, '%d-%m-%y') db.set_string_format(dtime, '%H-%M-%S') db.insert_as_strings(name="testname", fr_name=random.choice(fr_names), age=10, size=123, birth="07-10-95", birth_hour="20-53-3") assert db[-1].birth == date(1995, 10, 7) assert db[-1].name == "testname" assert db[-1].age == 10 assert db[-1].afloat == db.defaults["afloat"] db.insert_as_strings("testname", random.choice(fr_names), 11, 134, "09-12-94", 1.0, "5-6-13") assert db[len(db) - 1].birth == date(1994, 12, 9) assert db[-1].name == "testname" assert db[-1].age == 11 assert db[-1].size == 134 assert db[-1].afloat == 1.0 # search between 2 dates print '\nBirth between 1960 and 1970' for r in db.select(None, birth=[date(1960, 1, 1), date(1970, 12, 13)]): print r.name, r.birth print "sorted" for r in db.select(None, birth=[date(1960, 1, 1), date(1970, 12, 13)]).sort_by('+name-birth'): print r.name, r.birth f = buzhug_files.FloatFile().to_block def all(v): return [ord(c) for c in f(v)] # search between 2 floats # selection by list comprehension s1 = [r for r in db if 0.0 <= r.afloat <= 1e50] # selection by select s2 = db.select(None, 'x<=afloat<=y', x=0.0, y=1e50) # selction by select with interval s3 = db.select(None, afloat=[0.0, 1e50]) try: assert len(s1) == len(s2) == len(s3) except: print "%s records by list comprehension, " % len(s1) print "%s by select by formula," % len(s2) print "%s by select by interval" % len(s3) for r in s1: try: assert r in s2 except: print all(r.afloat) for r in s2: try: assert r in s1 except: print "in select but not in list comprehension", r raise r = db[0] assert r.__class__.db is db fr = random.choice(fr_names) s1 = [r for r in db if r.age == 30 and r.fr_name == unicode(fr, 'latin-1')] s2 = db.select(['name', 'fr_name'], age=30, fr_name=unicode(fr, 'latin-1')) assert len(s1) == len(s2) # different ways to count the number of items assert len(db) == sum([1 for r in db]) == len(db.select(['name'])) # check if version number is correctly incremented for i in range(5): recs = db.select_for_update(['name'], 'True') version = recs[0].__version__ recs[0].update() assert db[0].__version__ == version + 1 # check if cleanup doesn't change db length length_before = len(db) db.cleanup() assert len(db) == length_before # check if selection by select on __id__ returns the same as direct # access by id recs = db.select([], '__id__ == c', c=20) assert recs[0] == db[20] # check that has_key returns False for invalid hey assert not db.has_key(1000) # drop field db.drop_field('name') # check if field was actually removed from base definition and rows assert not "name" in db.fields assert not hasattr(db[20], "name") # add field db.add_field('name', str, default="marcel") # check if field was added with the correct default value assert "name" in db.fields assert hasattr(db[20], "name") assert db[20].name == "marcel" # change default value db.set_default("name", "julie") db.insert(age=20) assert db[-1].name == "julie" # delete a record db.delete([db[10]]) # check if record has been deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass assert 10 not in db assert len(db) == length_before # selections # selection by generator expression # age between 30 et 32 d_ids = [] for r in [r for r in db if 33 > r.age >= 30]: d_ids.append(r.__id__) length = len(db) # remove these items db.delete([r for r in db if 33 > r.age >= 30]) # check if correct number of records removed assert len(db) == length - len(d_ids) # check if all records have been removed assert not [r for r in db if 33 > r.age >= 30] # updates # select name = pierre s1 = db.select(['__id__', 'name', 'age', 'birth'], name='pierre') # make 'pierre' uppercase for record in db.select_for_update(None, 'name == x', x='pierre'): db.update(record, name=record.name.upper()) # check if attribute was correctly updated for rec in s1: assert db[rec.__id__] == "Pierre" # increment ages for record in db.select_for_update([], 'True'): age = record.age if not record.age is None: db.update(record, age=record.age + 1) # check assert db[record.__id__].age == age + 1 for record in [r for r in db]: age = record.age if not record.age is None: db.update(record, age=record.age + 1) # check assert db[record.__id__].age == age + 1 # change dates for record in db.select_for_update([], 'age>v', v=35): db.update(record, birth=date(random.randint(1958, 1999), random.randint(1, 12), 10)) db.commit() # check length after commit assert sum([1 for r in db]) == len(db) # insert new records for i in range(50): db.insert(name=random.choice(names), age=random.randint(7, 47), size=random.randint(110, 175)) # check that record 10 is still deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass print db.keys() print "has key 10 ?", db.has_key(10) assert 10 not in db #raw_input() # check that deleted_lines was cleared by commit() assert not db._pos.deleted_lines print db._del_rows.deleted_rows length = len(db) # before cleanup # physically remove the deleted items db.cleanup() # check that deleted_lines and deleted_rows are clean assert not db._pos.deleted_lines assert not db._del_rows.deleted_rows # check that record 10 is still deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass assert 10 not in db # check that length was not changed by cleanup assert len(db) == length assert len([r for r in db]) == length # age > 30 for r in db.select(['__id__', 'name', 'age'], 'name == c1 and age > c2', c1='pierre', c2=30): assert r.name == "pierre" assert r.age > 30 # name =="PIERRE" and age > 30 for r in db.select(['__id__', 'name', 'age', 'birth'], 'name == c1 and age > c2', c1='PIERRE', c2=30): assert r.name == 'PIERRE' assert r.age > 30 # test with != for r in db.select(['__id__'], 'name != c1', c1='claire'): assert r.name != 'claire' # age > id # with select s1 = db.select(['name', '__id__', 'age'], 'age > __id__') for r in s1: assert r.age > r.__id__ # with iter s2 = [r for r in db if r.age > r.__id__] for r in s2: assert r.age > r.__id__ assert len(s1) == len(s2) # birth > date(1978,1,1) # with select s1 = db.select(['name', '__id__', 'age'], 'birth > v', v=date(1978, 1, 1)) for r in s1: assert r.birth > date(1978, 1, 1) # with iter s2 = [r for r in db if r.birth and r.birth > date(1978, 1, 1)] for r in s2: assert r.birth > date(1978, 1, 1) assert len(s1) == len(s2) # test with floats for i in range(10): x = random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)) s1 = [r for r in db if r.afloat > x] s2 = db.select(['name'], 'afloat > v', v=x) assert len(s1) == len(s2) # base with external link houses = Base('houses') houses.create(('address', str), ('flag', bool), ('resident', db, db[0]), mode="override") addresses = [ 'Giono', 'Proust', 'Mauriac', 'Gide', 'Bernanos', 'Racine', 'La Fontaine' ] ks = db.keys() for i in range(50): x = random.choice(ks) address = random.choice(addresses) houses.insert(address=address, flag=address[0] > "H", resident=db[x]) # houses with jean s1 = [] for h in houses: if h.resident.name == 'jean': s1.append(h) # by select : ??? #s2 = houses.select([],'resident.name == v',v='jean') # assert len(s1) == len(s2) h1 = Base('houses') h1.open() l1 = len(h1.select([], flag=True)) l2 = len(h1.select([], flag=False)) assert l1 + l2 == len(h1) class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]' print h1[0] print "\nResident name: %(resident.name)s\nAddress: %(address)s" % h1[0]
assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(100): db.insert(name=random.choice(names), fr_name = unicode(random.choice(fr_names),'latin-1'), age=random.randint(7,47),size=random.randint(110,175), birth=date(random.randint(1858,1999),random.randint(1,12),10), afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)), birth_hour = dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) assert len(db)==100 db.open() # test if default values have not been modified after open() assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(5): # insert a list db.insert(random.choice(names), unicode(random.choice(fr_names),'latin-1'), random.randint(7,47),random.randint(110,175), date(random.randint(1958,1999),random.randint(1,12),10), random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)),
print print houses[0] print houses[0].resident.name print '\nhouses with jean' for h in houses: if h.resident.name == 'jean': print h.address, print h.resident.age print '\n select with resident.name = jean' recs = houses.select([], 'resident == v', v='jean') print recs h1 = Base('houses') h1.open() print '\nh1[0]' print h1[0] class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]'
def main(): events = Base(os.getcwd()+"\\Databases\\events.db") locations = Base(os.getcwd()+"\\Databases\\locations.db") photos = Base(os.getcwd()+"\\Databases\\photos.db") people = Base(os.getcwd()+"\\Databases\\people.db") faces = Base(os.getcwd()+"\\Databases\\faces.db") training = Base(os.getcwd()+"\\Databases\\training_images.db") features = Base(os.getcwd()+"\\Databases\\features.db") try: print "============ events ================" events.open() for field in events.fields: print field,events.fields[field] print "len",len(events),"\n\n" for record in events.select().sort_by("+firsttime"): print record elist =[ None for i in range(len(events))] print elist print "============ locations ================" locations.open() for field in locations.fields: print field,locations.fields[field] print "len",len(locations),"\n\n" #for record in locations: # print record print "============ photos ================" photos.open() for field in photos.fields: print field,photos.fields[field] print "len",len(photos),"\n\n" for record in photos: print record print "============ people ================" people.open() for field in people.fields: print field,people.fields[field] print "len",len(people),"\n\n" #for record in people: # print record print "=========== faces ==============" faces.open() for field in faces.fields: print field,faces.fields[field] print "len",len(faces),"\n\n" #for record in faces: # print record print "============ training ========" training.open() for field in training.fields: print field, training.fields[field] print "len",len(training),"\n\n" print "============ features ===========" features.open() for field in features.fields: print field, features.fields[field] print "len",len(features),"\n\n" except IOError as err: print "no database there:",err