def LoadPersonsExpos(): persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv")) personreader = csv.reader(persontab) headers = personreader.next() header = dict(zip(headers, range(len(headers)))) # make expeditions print "Loading expeditions" years = headers[5:] for year in years: lookupAttribs = {'year': year} nonLookupAttribs = {'name': "CUCC expo %s" % year} save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs) # make persons print "Loading personexpeditions" #expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",") #expomissing = set(expoers2008) for personline in personreader: name = personline[header["Name"]] name = re.sub("<.*?>", "", name) mname = re.match("(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name) nickname = mname.group(3) or "" lookupAttribs = { 'first_name': mname.group(1), 'last_name': (mname.group(2) or "") } nonLookupAttribs = { 'is_vfho': personline[header["VfHO member"]], } person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs) parseMugShotAndBlurb(personline=personline, header=header, person=person) # make person expedition from table for year, attended in zip(headers, personline)[5:]: expedition = models.Expedition.objects.get(year=year) if attended == "1" or attended == "-1": lookupAttribs = {'person': person, 'expedition': expedition} nonLookupAttribs = { 'nickname': nickname, 'is_guest': (personline[header["Guest"]] == "1") } save_carefully(models.PersonExpedition, lookupAttribs, nonLookupAttribs)
def saveMugShot(mugShotPath, mugShotFilename, person): if mugShotFilename.startswith( r'i/' ): #if filename in cell has the directory attached (I think they all do), remove it mugShotFilename = mugShotFilename[2:] else: mugShotFilename = mugShotFilename # just in case one doesn't dummyObj = models.Photo(file=mugShotFilename) #Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py if not os.path.exists(dummyObj.file.path): shutil.copy(mugShotPath, dummyObj.file.path) mugShotObj, created = save_carefully( models.Photo, lookupAttribs={ 'is_mugshot': True, 'file': mugShotFilename }, nonLookupAttribs={ 'caption': "Mugshot for " + person.first_name + " " + person.last_name }) if created: mugShotObj.contains_person.add(person) mugShotObj.save()
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground): """ saves a logbook entry and related persontrips """ trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground) if not author: print "skipping logentry", title return # tripCave = GetTripCave(place) # lplace = place.lower() if lplace not in noncaveplaces: cave = GetCaveLookup().get(lplace) #Check for an existing copy of the current entry, and save expeditionday = expedition.get_expedition_day(date) lookupAttribs = {'date': date, 'title': title} nonLookupAttribs = { 'place': place, 'text': text, 'author': author, 'expedition': expedition, 'expeditionday': expeditionday, 'cave': cave, 'slug': slugify(title)[:50] } lbo, created = save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs) for tripperson, time_underground in trippersons: lookupAttribs = {'personexpedition': tripperson, 'logbook_entry': lbo} nonLookupAttribs = { 'time_underground': time_underground, 'date': date, 'expeditionday': expeditionday, 'is_logbook_entry_author': (tripperson == author) } #print nonLookupAttribs save_carefully(models.PersonTrip, lookupAttribs, nonLookupAttribs)
def parse_KH_QMs(kh, inputFile): """import QMs from the 1623-161 (Kaninchenhöhle) html pages """ khQMs=open(settings.EXPOWEB+inputFile,'r') khQMs=khQMs.readlines() for line in khQMs: res=re.search('name=\"[CB](?P<year>\d*)-(?P<cave>\d*)-(?P<number>\d*).*</a> (?P<grade>[ABDCV])<dd>(?P<description>.*)\[(?P<nearest_station>.*)\]',line) if res: res=res.groupdict() year=int(res['year']) #check if placeholder exists for given year, create it if not placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 161", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date((year), 1, 1),"cave":kh}) lookupArgs={ 'found_by':placeholder, 'number':res['number'] } nonLookupArgs={ 'grade':res['grade'], 'nearest_station':res['nearest_station'], 'location_description':res['description'] } save_carefully(QM,lookupArgs,nonLookupArgs)
def get_or_create_placeholder(year): """ All surveys must be related to a logbookentry. We don't have a way to automatically figure out which survey went with which logbookentry, so we create a survey placeholder logbook entry for each year. This function always returns such a placeholder, and creates it if it doesn't exist yet. """ lookupAttribs = { 'date__year': int(year), 'title': "placeholder for surveys", } nonLookupAttribs = { 'text': "surveys temporarily attached to this should be re-attached to their actual trips", 'date': datetime.date(int(year), 1, 1) } placeholder_logbook_entry, newly_created = save_carefully( LogbookEntry, lookupAttribs, nonLookupAttribs) return placeholder_logbook_entry
def importSubcaves(cave): for link in getLinksInCaveDescription(cave): try: subcaveFilePath = os.path.join( settings.EXPOWEB, os.path.dirname(cave.description_file), link[0]) subcaveFile = open(subcaveFilePath, 'r') description = subcaveFile.read().decode('iso-8859-1').encode( 'utf-8') lookupAttribs = {'title': link[1], 'cave': cave} nonLookupAttribs = {'description': description} newSubcave = save_carefully(Subcave, lookupAttribs=lookupAttribs, nonLookupAttribs=nonLookupAttribs) logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave)) except IOError: logging.info("Subcave import couldn't open " + subcaveFilePath)
def LoadCaveTab(): cavetab = open(os.path.join(settings.EXPOWEB, "noinfo", "CAVETAB2.CSV"), 'rU') caveReader = csv.reader(cavetab) caveReader.next() # Strip out column headers logging.info("Beginning to import caves from " + str(cavetab) + "\n" + "-" * 60 + "\n") for katArea in ['1623', '1626']: if not models.Area.objects.filter(short_name=katArea): newArea = models.Area(short_name=katArea) newArea.save() logging.info("Added area " + str(newArea.short_name) + "\n") area1626 = models.Area.objects.filter(short_name='1626')[0] area1623 = models.Area.objects.filter(short_name='1623')[0] counter = 0 for line in caveReader: if line[Area] == 'nonexistent': continue entranceLetters = [ ] #Used in caves that have mulitlple entrances, which are not described on seperate lines if line[MultipleEntrances] == 'yes' or line[ MultipleEntrances] == '': #When true, this line contains an actual cave, otherwise it is an extra entrance. args = {} defaultArgs = {} def addToArgs(CSVname, modelName): if line[CSVname]: args[modelName] = html_to_wiki(line[CSVname]) def addToDefaultArgs( CSVname, modelName ): #This has to do with the non-destructive import. These arguments will be passed as the "default" dictionary in a get_or_create if line[CSVname]: defaultArgs[modelName] = html_to_wiki(line[CSVname]) # The attributes added using "addToArgs" will be used to look up an existing cave. Those added using "addToDefaultArgs" will not. addToArgs(KatasterNumber, "kataster_number") addToDefaultArgs(KatStatusCode, "kataster_code") addToArgs(UnofficialNumber, "unofficial_number") addToArgs(Name, "official_name") addToDefaultArgs(Comment, "notes") addToDefaultArgs(Explorers, "explorers") addToDefaultArgs(UndergroundDescription, "underground_description") addToDefaultArgs(Equipment, "equipment") addToDefaultArgs(KatasterStatus, "kataster_status") addToDefaultArgs(References, "references") addToDefaultArgs(UndergroundCentreLine, "underground_centre_line") addToDefaultArgs(UndergroundDrawnSurvey, "survey") addToDefaultArgs(Length, "length") addToDefaultArgs(Depth, "depth") addToDefaultArgs(Extent, "extent") addToDefaultArgs(SurvexFile, "survex_file") addToDefaultArgs(Notes, "notes") #The following adds the legacy_file_path. This is always in either Autogen file or Link file for header in (AutogenFile, LinkFile): if line[header]: addToDefaultArgs(header, "description_file") break #The following checks if this cave is non-public i.e. we don't have rights to display it online. #Noinfo was the name of the old password protected directory, so if it has that then we will #set the non_public field of the model instance to true. defaultArgs["non_public"] = line[AutogenFile].startswith( 'noinfo') or line[LinkFile].startswith('noinfo') newCave, created = save_carefully(models.Cave, lookupAttribs=args, nonLookupAttribs=defaultArgs) logging.info("Added cave " + str(newCave) + "\n") #If we created a new cave, add the area to it. This does mean that if a cave's identifying features have not changed, areas will not be updated from csv. if created and line[Area]: if line[Area] == "1626": newCave.area.add(area1626) else: area = models.Area.objects.filter(short_name=line[Area]) if area: newArea = area[0] else: newArea = models.Area(short_name=line[Area], parent=area1623) newArea.save() newCave.area.add(newArea) elif created: newCave.area.add(area1623) newCave.save() logging.info("Added area " + line[Area] + " to cave " + str(newCave) + "\n") if created and line[UnofficialName]: newUnofficialName = models.OtherCaveName( cave=newCave, name=line[UnofficialName]) newUnofficialName.save() logging.info("Added unofficial name " + str(newUnofficialName) + " to cave " + str(newCave) + "\n") if created and line[MultipleEntrances] == '' or \ line[MultipleEntrances] == 'entrance' or \ line[MultipleEntrances] == 'last entrance': args = {} def addToArgs(CSVname, modelName): if line[CSVname]: args[modelName] = html_to_wiki(line[CSVname]) def addToArgsViaDict(CSVname, modelName, dictionary): if line[CSVname]: args[modelName] = dictionary[html_to_wiki(line[CSVname])] addToArgs(EntranceName, 'name') addToArgs(Explorers, 'explorers') addToArgs(Map, 'map_description') addToArgs(Location, 'location_description') addToArgs(Approach, 'approach') addToArgs(EntranceDescription, 'entrance_description') addToArgs(UndergroundDescription, 'underground_description') addToArgs(PhotoOfLocation, 'photo') addToArgsViaDict( Marking, 'marking', { "Paint": "P", "Paint (?)": "P?", "Tag": "T", "Tag (?)": "T?", "Retagged": "R", "Retag": "R", "Spit": "S", "Spit (?)": "S?", "Unmarked": "U", "": "?", }) addToArgs(MarkingComment, 'marking_comment') addToArgsViaDict( Findability, 'findability', { "Surveyed": "S", "Lost": "L", "Refindable": "R", "": "?", "?": "?", }) addToArgs(FindabilityComment, 'findability_description') addToArgs(Easting, 'easting') addToArgs(Northing, 'northing') addToArgs(Altitude, 'alt') addToArgs(DescriptionOfOtherPoint, 'other_description') def addToArgsSurveyStation(CSVname, modelName): if line[CSVname]: surveyPoint = models.SurveyStation(name=line[CSVname]) surveyPoint.save() args[modelName] = surveyPoint addToArgsSurveyStation(TagPoint, 'tag_station') addToArgsSurveyStation(ExactEntrance, 'exact_station') addToArgsSurveyStation(OtherPoint, 'other_station') addToArgs(OtherPoint, 'other_description') if line[GPSpreSA]: addToArgsSurveyStation(GPSpreSA, 'other_station') args['other_description'] = 'pre selective availability GPS' if line[GPSpostSA]: addToArgsSurveyStation(GPSpostSA, 'other_station') args['other_description'] = 'post selective availability GPS' addToArgs(Bearings, 'bearings') newEntrance = models.Entrance(**args) newEntrance.save() logging.info("Added entrance " + str(newEntrance) + "\n") if line[Entrances]: entrance_letter = line[Entrances] else: entrance_letter = '' newCaveAndEntrance = models.CaveAndEntrance( cave=newCave, entrance=newEntrance, entrance_letter=entrance_letter) newCaveAndEntrance.save() logging.info("Added CaveAndEntrance " + str(newCaveAndEntrance) + "\n")