def merge(self, mergeState): # Merge in yseult data for json_file in [x for x in os.listdir(filepaths.gentmpdir) if x.endswith('.json') and x.startswith('yseult_')]: print " file %s" % json_file data = json.load(file("%s/%s" % (filepaths.gentmpdir,json_file))) id = json_file[len('yseult_'):-len('.json')] print >>sys.stderr,"Yseult file for %s" % id filepaths.saveDetailFile(data, id) mergeState.courseIds.remove(id)
def merge(self, mergeState): # Fakes for cid in mergeState.courseIds: print >>sys.stderr,"MISSING %s" % cid ds = details.Details(cid,mergeState.names[cid],"Example organiser","Example location",{"notes": "", "course-description": ""}) for term in ['Michaelmas','Lent','Easter']: for type in ['Lecture','Practical']: ds.addRow(element.Element("Example person",mergeState.names[cid],"Example location",FullPattern(term[:2]+' '+self.fake_time()),False,type,mergeState.names[cid])) filepaths.saveDetailFile(ds.to_json(),cid)
def merge(self, mergeState): # Merge in yseult data for json_file in [ x for x in os.listdir(filepaths.gentmpdir) if x.endswith('.json') and x.startswith('yseult_') ]: print " file %s" % json_file data = json.load(file("%s/%s" % (filepaths.gentmpdir, json_file))) id = json_file[len('yseult_'):-len('.json')] print >> sys.stderr, "Yseult file for %s" % id filepaths.saveDetailFile(data, id) mergeState.courseIds.remove(id)
def merge(self, mergeState): # Go through the spreadsheets one by one if os.path.isdir(filepaths.newCourseSheetDir): for csv_file in [x for x in os.listdir(filepaths.newCourseSheetDir) if x.endswith('.csv')]: print >>sys.stderr,"Reading %s" % csv_file ds = details.Details.from_csv(open("%s/%s" % (filepaths.newCourseSheetDir,csv_file)),verifier = csvverifier.Verifier()) if not ds.id in courseIds: print >>sys.stderr," already handled, skipping" continue filepaths.saveDetailFile(ds.to_json(),ds.id) mergeState.courseIds.remove(ds.id) else: print "No manual new course sheet files in %s skipping" % (filepaths.newCourseSheetDir)
def merge(self, mergeState): # Merge in explicit PDF data pdfs = collections.defaultdict(list) for row in csv.reader(file(filepaths.pdfsDataFilePath)): if len(row) < 4: continue (id, _, name, pdf) = row[0:4] pdfs[id].append({'pdf': pdf, 'name': name}) print >> sys.stderr, "Using PDF for %s (%s)" % (id, name) if id in mergeState.courseIds: mergeState.courseIds.remove(id) for (cid, datas) in pdfs.iteritems(): if not cid in mergeState.names: continue out = {'id': cid, 'name': mergeState.names[cid], 'staticurls': []} for data in datas: out['staticurls'].append(data['pdf']) filepaths.saveDetailFile(out, cid)
def merge(self, mergeState): # Fakes for cid in mergeState.courseIds: print >> sys.stderr, "MISSING %s" % cid ds = details.Details(cid, mergeState.names[cid], "Example organiser", "Example location", { "notes": "", "course-description": "" }) for term in ['Michaelmas', 'Lent', 'Easter']: for type in ['Lecture', 'Practical']: ds.addRow( element.Element( "Example person", mergeState.names[cid], "Example location", FullPattern(term[:2] + ' ' + self.fake_time()), False, type, mergeState.names[cid])) filepaths.saveDetailFile(ds.to_json(), cid)
def merge(self, mergeState): # Go through the spreadsheets one by one if os.path.isdir(filepaths.newCourseSheetDir): for csv_file in [ x for x in os.listdir(filepaths.newCourseSheetDir) if x.endswith('.csv') ]: print >> sys.stderr, "Reading %s" % csv_file ds = details.Details.from_csv(open( "%s/%s" % (filepaths.newCourseSheetDir, csv_file)), verifier=csvverifier.Verifier()) if not ds.id in courseIds: print >> sys.stderr, " already handled, skipping" continue filepaths.saveDetailFile(ds.to_json(), ds.id) mergeState.courseIds.remove(ds.id) else: print "No manual new course sheet files in %s skipping" % ( filepaths.newCourseSheetDir)
def merge(self, mergeState): # Merge in explicit PDF data pdfs = collections.defaultdict(list) for row in csv.reader(file(filepaths.pdfsDataFilePath)): if len(row) < 4: continue (id,_,name,pdf) = row[0:4] pdfs[id].append({'pdf': pdf, 'name': name}) print >>sys.stderr,"Using PDF for %s (%s)" % (id,name) if id in mergeState.courseIds: mergeState.courseIds.remove(id) for (cid,datas) in pdfs.iteritems(): if not cid in mergeState.names: continue out = { 'id': cid, 'name': mergeState.names[cid], 'staticurls': [] } for data in datas: out['staticurls'].append(data['pdf']) filepaths.saveDetailFile(out, cid)
def merge(self, mergeState): # Merge in pdn-originated data if os.path.isfile(filepaths.pdnSrcFilePath): pdn_idx = {} for json_file in [x for x in os.listdir(filepaths.gentmpdir) if x.endswith('.json') and x.startswith('pdnout-')]: data = json.load(file("%s/%s" % (filepaths.gentmpdir,json_file))) (part,course) = data['id'] pdn_idx[(part,course)] = data for row in csv.reader(file(filepaths.pdnSrcFilePath)): if row[0] == 'ID' or len(row) < 4: continue (id,part,course,organiser) = row[0:4] if not id in mergeState.courseIds: continue if (part,course) in pdn_idx: data = pdn_idx[(part,course)] print >>sys.stderr,"Using PDN source for %s %s" % (part,course) data['organiser'] = organiser data['id'] = id filepaths.saveDetailFile(data,id) mergeState.courseIds.remove(id) else: print "No PDN source files present (%s), skipping" % (filepaths.pdnSrcFilePath)