def holidays(year):
    os.environ['TZ'] = 'Europe/Copenhagen'
    time.tzset()
    url = "http://www.calendar-365.com/holidays/%s.html" % (year)

    response = requests.post(url)

    html = response.text

    soup = Soup(html)

    rows = soup.find("table", attrs={"class" : "table1"}).findAll("tr")
    del rows[0]

    # Create an array of months mapping danish name->month number
    months = {}
    months["january"]    = "01"
    months["february"]   = "02"
    months["march"]     = "03"
    months["april"]     = "04"
    months["may"]       = "05"
    months["june"]      = "06"
    months["july"]      = "07"
    months["august"]    = "08"
    months["september"] = "09"
    months["october"]   = "10"
    months["november"]  = "11"
    months["december"]  = "12"

    days = []

    # Loop over the rows
    for row in rows:
        elements = row.findAll("td")

        # Locate the day in integer and month in string
        dayProg = re.compile(r"(?P<day>[0-9]*)\. (?P<month>[a-zA-Z]*)")
        groups = dayProg.search(unicode(elements[0].find("span").text))

        days.append({
            "date" : datetime.fromtimestamp(mktime(time.strptime("%s %s %s" % (
            functions.zeroPadding(groups.group("day")), months[groups.group("month")], year), "%d %m %Y"))),
            "title" : unicode(elements[2].find("a").text).replace(" %s" % (year),""),
            "link" : elements[2].find("a")["href"],
            "year" : year,
            "country" : "en-US",
            "source" : "http://www.calendar-365.com/holidays/",
            "_updated" : datetime.now(),
            "_created" : datetime.now()
        })

    return {
        "status" : "ok",
        "days" : days
    }
def flag_days ():
	url = "http://hvorforflagerbussen.dk/flagdage/"

	response = requests.get(url)

	html = response.text

	soup = Soup(html)

   	rows = soup.findAll("tr")
   	rows.pop(0)

   	flag_days = []

   	# Create an array of months mapping danish name->month number
	months = {}
	months["januar"]    = "01"
	months["februar"]   = "02"
	months["marts"]     = "03"
	months["april"]     = "04"
	months["maj"]       = "05"
	months["juni"]      = "06"
	months["juli"]      = "07"
	months["august"]    = "08"
	months["september"] = "09"
	months["oktober"]   = "10"
	months["november"]  = "11"
	months["december"]  = "12"

	for row in rows:
		elements = row.findAll("td")

		# Locate the day in integer and month in string
		dayProg = re.compile(r"(?P<day>[0-9]*)\. (?P<month>[a-zA-Z]*)")
		groups = dayProg.search(elements[0].text)

		name = unicode(elements[1].text)
		flag_days.append({
			"name" : name,
			"date" : datetime.fromtimestamp(mktime(time.strptime("%s %s %s" % (
            functions.zeroPadding(groups.group("day")), months[groups.group("month")], date.today().year), "%d %m %Y"))),
            "region" : "copenhagen",
            "country" : "da_DK"
		})


	return {
		"status" : "ok",
		"days" : flag_days
	}
Beispiel #3
0
def leave_reasons ( config, session = False ):
	url = "https://www.lectio.dk/lectio/%s/subnav/fravaerelev.aspx?elevid=%s&lectab=aarsager" % ( str(config["school_id"]), str(config["student_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}

	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("table", attrs={"id" : "s_m_Content_Content_FatabAbsenceFravaerGV"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	missing = []
	reasons = []

	reasonKeys = {
		u"Andet" : "other",
		u"Kom for sent" : "too_late",
		u"Skolerelaterede aktiviteter" : "school_related",
		u"Private forhold" : "private",
		u"Sygdom" : "sick"
	}

	# TODO: Add Missing
	if soup.find(attrs={"id" : "s_m_Content_Content_FatabMissingAarsagerGV"}).find(attrs={"class" : "noRecord"}) is None:
		print "missing"

	if soup.find(attrs={"id" : "s_m_Content_Content_FatabAbsenceFravaerGV"}).find(attrs={"class" : "noRecord"}) is None:
		rows = soup.find(attrs={"id" : "s_m_Content_Content_FatabAbsenceFravaerGV"}).findAll("tr")
		rows.pop(0)

		activityProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>.*)&prevurl=(?P<prev_url>.*)")
		datetimeProg = re.compile(r"(?P<day>.*)\/(?P<month>.*)-(?P<year>.*) (?P<time>.*)")

		for row in rows:
			elements = row.findAll("td")
			activityGroups = activityProg.match(elements[2].find("a")["href"])
			dateGroups = datetimeProg.match(elements[5].find("span").text.strip().replace("\r\n", "").replace("\t", ""))
			reasons.append({
				"type" : "lesson" if elements[0].text.strip().replace("\r\n", "").replace("\t", "") == "Lektion" else "other",
				"week" : elements[1].text.strip().replace("\r\n", "").replace("\t", ""),
				"activity_id" : activityGroups.group("activity_id") if not activityGroups is None else "",
				"leave" : elements[3].text.strip().replace("\r\n", "").replace("\t", "").replace("%", ""),
				"creadited" :True if elements[4].text.strip().replace("\r\n", "").replace("\t", "") == "Ja" else False,
				"registred" : datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("time")), "%d/%m-%Y %H:%M"),
				"teacher" : {
					"abbrevation" : unicode(elements[6].text.strip().replace("\r\n", "").replace("\t", ""))
				},
				"team" : {
					"name" : unicode(elements[7].text.strip().replace("\r\n", "").replace("\t", ""))
				},
				"comment" : unicode(elements[8].text.strip().replace("\r\n", "").replace("\t", "")),
				"reason" : {
					"value" : unicode(elements[9].text.strip().replace("\r\n", "").replace("\t", "")),
					"key" : reasonKeys[unicode(elements[9].text.strip().replace("\r\n", "").replace("\t", ""))] if unicode(elements[9].text.strip().replace("\r\n", "").replace("\t", "")) in reasonKeys else "other",
					"note": unicode(elements[10].text.strip().replace("\r\n", "").replace("\t", ""))
				},

			})

	return {
		"status" : "ok",
		"reasons" : reasons,
		"missing" : missing
	}
Beispiel #4
0
def assignment_info(config, session=False):
    url = urls.assignment_info.replace("{{SCHOOL_ID}}", str(
        config["school_id"])).replace("{{ASSIGNMENT_ID}}",
                                      str(config["assignment_id"])).replace(
                                          "{{STUDENT_ID}}",
                                          str(config["student_id"]))

    if session is False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}

    # Insert the session information from the auth function
    cookies = {
        "lecmobile": "0",
        "ASP.NET_SessionId": session["ASP.NET_SessionId"],
        "LastLoginUserName": session["LastLoginUserName"],
        "lectiogsc": session["lectiogsc"],
        "LectioTicket": session["LectioTicket"]
    }

    settings = {}

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.get(url, headers=headers)

    html = response.text

    soup = Soup(html)

    dateTime = re.compile(
        r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

    if soup.find("div", attrs={"id": "m_Content_registerAfl_pa"}) is None:
        return {"status": False, "error": "Data not found"}

    teacherProg = re.compile(r"(?P<name>.*) \((?P<abbrevation>.*)\)")
    documentProg = re.compile(r"(?P<name>.*) \((?P<upload_date>.*)\)")
    teamProg = re.compile(r"(?P<class_name>.*) (?P<subject_name>.*)")

    rows = soup.find("div", attrs={
        "id": "m_Content_registerAfl_pa"
    }).find("table").findAll("td")
    headers = soup.find("div", attrs={
        "id": "m_Content_registerAfl_pa"
    }).find("table").findAll("th")
    rowMap = functions.mapRows(headers, rows)

    dateTimeGroups = dateTime.match(rowMap["Afleveringsfrist"].text)

    date = datetime.strptime(
        "%s/%s-%s %s:%s" %
        (functions.zeroPadding(dateTimeGroups.group("day")),
         functions.zeroPadding(dateTimeGroups.group("month")),
         dateTimeGroups.group("year"), dateTimeGroups.group("hour"),
         dateTimeGroups.group("minute")), "%d/%m-%Y %H:%M")

    group_assignment = False
    members = []
    teachers = []
    teams = []
    documents = []
    comments = []

    uploadRows = soup.find("table", attrs={
        "id": "m_Content_RecipientGV"
    }).findAll("tr")
    uploadRows.pop(0)
    uploadProg = re.compile(
        r"\/lectio/(?P<school_id>.*)\/ExerciseFileGet.aspx\?type=(?P<type>.*)&entryid=(?P<entry_id>.*)"
    )

    for row in uploadRows:
        elements = row.findAll("td")
        context_card_id = elements[1].find("span")["lectiocontextcard"]
        dateTimeGroups = dateTime.match(elements[0].find("span").text)
        upload_type = ""
        entry_id = ""
        if not elements[3].find("a") is None:
            uploadGroups = uploadProg.match(elements[3].find("a")["href"])
            entry_id = uploadGroups.group("entry_id")
            upload_type = "student_assignment" if uploadGroups.group(
                "type") == "elevopgave" else "other"

        uploadDate = datetime.strptime(
            "%s/%s-%s %s:%s" %
            (functions.zeroPadding(dateTimeGroups.group("day")),
             functions.zeroPadding(dateTimeGroups.group("month")),
             dateTimeGroups.group("year"), dateTimeGroups.group("hour"),
             dateTimeGroups.group("minute")), "%d/%m-%Y %H:%M")

        comments.append({
            "file": {
                "name":
                elements[3].find("a").text.encode("utf8")
                if not elements[3].find("a") is None else "",
                "entry_id":
                entry_id,
                "type":
                upload_type
            },
            "comment":
            functions.cleanText(elements[2].text).encode("utf8"),
            "uploader": {
                "name":
                elements[1].find("span")["title"].encode("utf8")
                if context_card_id[0] == "T" else
                elements[1].find("span").text.encode("utf8"),
                "type":
                "teacher" if context_card_id[0] == "T" else "student",
                "person_id":
                context_card_id.replace("T", "") if context_card_id[0] == "T"
                else context_card_id.replace("S", ""),
                "context_card_id":
                context_card_id,
                "abbrevation":
                elements[1].find("span").text.encode("utf8")
                if context_card_id[0] == "T" else ""
            },
            "date":
            uploadDate
        })

    documentIdProg = re.compile(
        r"\/lectio\/(?P<school_id>.*)\/ExerciseFileGet.aspx\?type=(?P<type>.*)&exercisefileid=(?P<exercise_file_id>.*)"
    )

    statusProg = re.compile(r"(?P<status>.*)\/ (.*): (?P<leave>.*)%")
    studentDataElements = soup.find("table",
                                    attrs={
                                        "id": "m_Content_StudentGV"
                                    }).findAll("tr")[1].findAll("td")
    statusGroups = statusProg.match(
        functions.cleanText(studentDataElements[3].text).encode("utf8"))
    status = functions.cleanText(
        statusGroups.group("status")) if not statusGroups is None else ""
    studentData = {
        "student": {
            "context_card_id":
            studentDataElements[0].find("img")["lectiocontextcard"],
            "student_id":
            studentDataElements[0].find("img")["lectiocontextcard"].replace(
                "S", ""),
        },
        "status":
        "handed" if status.strip() == "Afleveret" else "missing",
        "waiting_for":
        "student"
        if functions.cleanText(studentDataElements[2].text) == "Elev" else
        "teacher" if unicode(functions.cleanText(
            studentDataElements[2].text)) == u"Lærer" else "none",
        "leave":
        functions.cleanText(statusGroups.group("leave"))
        if not statusGroups is None else 0,
        "finished":
        True if soup.find(
            "input", attrs={
                "id": "m_Content_StudentGV_ctl02_CompletedCB"
            }).has_attr("checked") and soup.find(
                "input", attrs={"id": "m_Content_StudentGV_ctl02_CompletedCB"
                                })["checked"] == "checked" else False,
        "grade":
        functions.cleanText(studentDataElements[5].text).encode("utf8"),
        "grade_note":
        functions.cleanText(studentDataElements[6].text).encode("utf8"),
        "student_note":
        functions.cleanText(studentDataElements[7].text).encode("utf8")
    }

    if u"Opgavebeskrivelse" in rowMap:
        for row in rowMap[u"Opgavebeskrivelse"].findAll("a"):
            fileNameGroups = documentProg.match(
                functions.cleanText(row.text.strip()))
            fileIdGroups = documentIdProg.match(row["href"])
            documentType = fileIdGroups.group(
                "type") if not fileIdGroups is None else "",
            documents.append({
                "name":
                fileNameGroups.group("name")
                if not fileNameGroups is None else "",
                "exercise_file_id":
                fileIdGroups.group("exercise_file_id")
                if not fileIdGroups is None else "",
                "uploaded_date_string":
                fileNameGroups.group("upload_date")
                if not fileNameGroups is None else "",
                "type":
                "exercise_description",
                "school_id":
                fileIdGroups.group("school_id")
                if not fileIdGroups is None else ""
            })

    for row in rowMap["Hold"].findAll("span"):
        #teamGroups = teamProg.match(row.text)
        teams.append({
            #"class_name" : unicode(teamGroups.group("class_name")) if not teamGroups is None else "",
            #"subject_name" : unicode(teamGroups.group("subject_name")) if not teamGroups is None else "",
            "team_element_name":
            row.text,
            "team_element_id":
            rowMap["Hold"].find("span")["lectiocontextcard"].replace("HE", ""),
            "context_card_id":
            rowMap["Hold"].find("span")["lectiocontextcard"]
        })

    for row in rowMap["Ansvarlig"].findAll("span"):
        teacherGroups = teacherProg.match(row.text)
        teachers.append({
            "teacher_id":
            row["lectiocontextcard"].replace("T", ""),
            "name":
            teacherGroups.group("name").encode("utf8")
            if not teacherGroups is None else "",
            "context_card_id":
            row["lectiocontextcard"],
            "abbrevation":
            teacherGroups.group("abbrevation").encode("utf8")
            if not teacherGroups is None else ""
        })

    if soup.find("div", attrs={"id": "m_Content_groupIsland_pa"}):
        group_assignment = True
        memberRows = soup.find("table",
                               attrs={
                                   "id": "m_Content_groupMembersGV"
                               }).findAll("tr")
        memberRows.pop(0)
        memberProg = re.compile(r"(?P<name>.*), (?P<code>.*)")

        for row in memberRows:
            elements = row.findAll("td")
            memberGroups = memberProg.match(elements[0].find("span").text)
            members.append({
                "name":
                memberGroups.group("name") if not memberGroups is None else "",
                "student_id":
                elements[0].find("span")["lectiocontextcard"].replace("S", ""),
                "context_card_id":
                elements[0].find("span")["lectiocontextcard"],
                "student_class_code":
                memberGroups.group("code") if not memberGroups is None else ""
            })
    else:
        memberProg = re.compile(
            r"Eleven (?P<name>.*) \((?P<code>.*)\) - Opgaveaflevering")
        memberGroups = memberProg.match(
            soup.find(attrs={
                "id": "m_HeaderContent_pageHeader"
            }).find("div").text)
        members.append({
            "student_id":
            config["student_id"],
            "context_card_id":
            soup.find(attrs={
                "id": "m_HeaderContent_pageHeader"
            }).find("div")["lectiocontextcard"],
            "student_class_code":
            memberGroups.group("code") if not memberGroups is None else "",
            "name":
            memberGroups.group("name") if not memberGroups is None else "",
        })

    availableStudents = []
    availableStudentProg = re.compile(r"(?P<name>.*) \((?P<code>.*)\)")

    if not soup.find("select", attrs={"id": "m_Content_groupStudentAddDD"
                                      }) is None:

        for row in soup.find("select",
                             attrs={
                                 "id": "m_Content_groupStudentAddDD"
                             }).findAll("option"):
            progGroups = availableStudentProg.match(row.text)
            availableStudents.append({
                "name":
                str(progGroups.group("name")).decode("utf8"),
                "student_id":
                row["value"],
                "student_class_code":
                progGroups.group("code"),
            })

    infomation = {
        "documents":
        documents,
        "title":
        rowMap[r"Opgavetitel"].find("span").text.encode("utf8"),
        "group_assignment":
        group_assignment,
        "members":
        members,
        "note":
        rowMap[u"Opgavenote"].text.encode("utf8"),
        "team":
        teams,
        "grading_scale":
        "7-step"
        if rowMap[u"Karakterskala"].text == "7-trinsskala" else "13-step",
        "teachers":
        teachers,
        "student_time":
        rowMap[u"Elevtid"].text.replace(",", ".").replace("timer", ""),
        "date":
        date,
        "in_instruction_detail":
        True if rowMap[u"Iundervisningsbeskrivelse"].text == "Ja" else False,
        "comments":
        comments,
        "group": {
            "available_students": availableStudents
        },
        "student":
        studentData
    }

    #Delivered by, grade, grade_note, student_note, ended, awaiting, uploaded-documents

    return {"status": "ok", "information": infomation}
Beispiel #5
0
def message ( config, session = False ):
	url = "https://www.lectio.dk/lectio/%s/beskeder2.aspx?type=liste&elevid=%s" % ( str(config["school_id"]), str(config["student_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}

	# Insert the session information from the auth function
	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	viewStateX = soup.find("input", attrs={"id" : "__VIEWSTATEX"})["value"]

	settings = {
		"__EVENTTARGET" : "__Page",
		"__EVENTARGUMENT" : "$LB2$_MC_$_%s" % ( str(config["thread_id"]) ),
		"__VIEWSTATEX" : viewStateX,
	}

	response = proxy.session.post(url, data=settings, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("div", attrs={"id" : "s_m_Content_Content_ViewThreadPagePanel"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	flagged = False if soup.find("input", attrs={"id" : "s_m_Content_Content_FlagThisThreadBox"})["src"] == "/lectio/img/flagoff.gif" else True

	originalElements = soup.find("table", attrs={"class" : "ShowMessageRecipients"}).findAll("td")

	originalSenderUser = context_card.user({
		"context_card_id" : originalElements[8].find("span")["lectiocontextcard"],
		"school_id" : config["school_id"]
	}, session)

	originalSenderUser["user"]["user_context_card_id"] = originalElements[8].find("span")["lectiocontextcard"]
	originalSenderUser["user"]["person_id"] = originalElements[8].find("span")["lectiocontextcard"].replace("U", "")

	originalSubject = unicode(functions.cleanText(originalElements[2].text))

	recipients = []

	studentRecipientProg = re.compile(r"(?P<name>.*) \((?P<student_class_id>.*)\)")
	teacherRecipientProg = re.compile(r"(?P<name>.*) \((?P<abbrevation>.*)\)")

	# Fill in the single users, added as recipients
	for row in originalElements[11].findAll("span"):
		context_card_id = row["lectiocontextcard"]
		userType = ""
		data = {
			"context_card_id" : context_card_id
		}

		if "S" in context_card_id:
			userType = "student"
			studentGroups = studentRecipientProg.match(row.text)
			data["person_id"] = context_card_id.replace("S", "")
			data["student_id"] = context_card_id.replace("S", "")
			data["name"] = unicode(studentGroups.group("name")) if not studentGroups is None else ""
			data["student_class_id"] = studentGroups.group("student_class_id") if not studentGroups is None else ""

		elif "T" in context_card_id:
			userType = "teacher"
			teacherGroups = teacherRecipientProg.match(row.text)
			data["person_id"] = context_card_id.replace("T", "")
			data["teacher_id"] = context_card_id.replace("T", "")
			data["abbrevation"] = unicode(teacherGroups.group("abbrevation")) if not teacherGroups is None else ""
			data["name"] = unicode(teacherGroups.group("name")) if not teacherGroups is None else ""

		data["type"] = userType

		recipients.append(data)

		row.decompose()

	recipientRows = originalElements[11].text.split(", ")

	for row in recipientRows:
		text = row.replace("\n", "").replace("\r", "").replace("\t", "")

		if "Holdet" in text:
			text = text.replace("Holdet ", "")

			recipients.append({
				"type" : "team",
				"name" : unicode(text)
			})
		elif "Gruppen" in text:
			text = text.replace("Gruppen ", "")
			recipients.append({
				"type" : "group",
				"name" : unicode(text)
			})

	messages = []

	answerProg = re.compile(r"javascript:__doPostBack\('__Page','ANSWERMESSAGE_(?P<message_id>.*)'\);")
	dateTimeProg = re.compile(r"(?P<day>.*)\/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
	messageLevels = {}

	for row in soup.find("table", attrs={"id" : "s_m_Content_Content_ThreadTable"}).findAll("tr"):
		if not row.find("table") is None:
			level = row.findAll(has_colspan)[0]["colspan"]
			data = {}
			messageDetailElements = row.find("table").findAll("td")

			# Subject
			data["subject"] = unicode(messageDetailElements[0].find("h4").text)
			messageDetailElements[0].find("h4").decompose()

			# Sender
			messageSender = context_card.user({
				"context_card_id" : messageDetailElements[0].find("span")["lectiocontextcard"],
				"school_id" : config["school_id"]
			}, session)

			messageSender["user"]["user_context_card_id"] = originalElements[8].find("span")["lectiocontextcard"]
			messageSender["user"]["person_id"] = originalElements[8].find("span")["lectiocontextcard"].replace("U", "")
			data["sender"] = messageSender["user"]

			messageDetailElements[0].find("span").decompose()

			# Time
			timeText = messageDetailElements[0].text.replace("Af , ", "").strip().replace("\n", "").replace("\t", "")
			dateGroups = dateTimeProg.match(timeText)
			data["date"] = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""

			# Message id
			answerGroups = answerProg.match(messageDetailElements[1].find("button")["onclick"])
			message_id = answerGroups.group("message_id") if not answerGroups is None else ""
			data["message_id"] = message_id

			row.find("table").decompose()

			# Get message text
			data["message"] = unicode(row.text.strip())

			# Get parent
			if str(int(level)+1) in messageLevels:
				data["parrent_id"] = messageLevels[str(int(level)+1)]

			messageLevels[level] = message_id

			messages.append(data)

	messageInfo = {
		"original_subject" : originalSubject,
		"flagged" : flagged,
		"original_sender" : originalSenderUser["user"],
		"recipients" : recipients,
		"messages" : messages
	}

	return {
		"status" : "ok",
		"message" : messageInfo,
	}
Beispiel #6
0
def document(config, session=False):
    url = "https://www.lectio.dk/lectio/%s/dokumentrediger.aspx?dokumentid=%s" % (
        str(config["school_id"]), str(config["document_id"]))

    if session is False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}

    cookies = {
        "lecmobile": "0",
        "ASP.NET_SessionId": session["ASP.NET_SessionId"],
        "LastLoginUserName": session["LastLoginUserName"],
        "lectiogsc": session["lectiogsc"],
        "LectioTicket": session["LectioTicket"]
    }

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.get(url, headers=headers)

    html = response.text

    soup = Soup(html)

    if soup.find("div", attrs={"id": "m_Content_Dokument_pa"}) is None:
        return {"status": False, "error": "Data not found"}

    offset = 0

    elements = soup.find("div", attrs={
        "id": "m_Content_Dokument_pa"
    }).findAll("td")

    if len(elements) < 7:
        offset = 1

    creator = context_card.user(
        {
            "context_card_id":
            elements[3 - offset].find("span")["lectiocontextcard"],
            "school_id":
            config["school_id"]
        }, session)["user"]

    changer = elements[4 - offset].find("span")["lectiocontextcard"]
    elements[4 - offset].find("span").decompose()
    dateText = elements[4 - offset].text.replace(" af ", "").strip()
    dateTimeProg = re.compile(
        r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
    dateGroups = dateTimeProg.match(dateText)
    date = datetime.strptime(
        "%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")),
                            functions.zeroPadding(dateGroups.group("month")),
                            dateGroups.group("year"), dateGroups.group("hour"),
                            dateGroups.group("minute")),
        "%d/%m-%Y %H:%M") if not dateGroups is None else ""

    connectionRows = soup.find("table",
                               attrs={
                                   "id": "m_Content_AffiliationsGV"
                               }).findAll("tr")
    connectionRows.pop(0)

    connections = []

    for row in connectionRows:
        rowElements = row.findAll("td")

        data = {
            "context_card_id":
            rowElements[0]["lectiocontextcard"],
            "type":
            "team" if "H" in rowElements[0]["lectiocontextcard"] else "teacher"
            if "T" in rowElements[0]["lectiocontextcard"] else "student",
            "name":
            unicode(rowElements[0].find("span").text),
            "can_edit":
            True if "checked" in rowElements[1].find("input").attrs else False
        }

        if rowElements[2].find("select"):
            folder_id = rowElements[2].find("select").select(
                'option[selected="selected"]')[0]["value"]
            data["folder_id"] = folder_id

        connections.append(data)

    document = {
        "name":
        unicode(elements[0].find("a").text).replace("\t", "").replace(
            "\n", "").replace("\r", "").strip(),
        "extension":
        os.path.splitext(elements[0].find("a").text.replace("\t", "").replace(
            "\n", "").replace("\r", "").strip())[1].replace(".", ""),
        "size":
        elements[2 - offset].text.replace(",", ".").replace("\t", "").replace(
            "\n", "").replace("\r", "").strip(),
        "document_id":
        str(config["document_id"]),
        "creator":
        creator,
        "changer": {
            "context_card_id": changer,
            "type": "teacher" if "T" in changer else "student",
            "date": date
        },
        "comment":
        soup.find("textarea", attrs={
            "id": "m_Content_EditDocComments_tb"
        }).text.replace("\r\n", ""),
        "public":
        True if "checked" in soup.find("input",
                                       attrs={
                                           "id": "m_Content_EditDocIsPublic"
                                       }).attrs else False,
        "connections":
        connections,
        "term": {
            "value":
            soup.find("select", attrs={
                "id": "m_ChooseTerm_term"
            }).select('option[selected="selected"]')[0]["value"],
            "years_string":
            soup.find("select", attrs={
                "id": "m_ChooseTerm_term"
            }).select('option[selected="selected"]')[0].text
        }
    }

    return {"status": "ok", "document": document}
Beispiel #7
0
def activity_info(config, activity_id, session = False, modules = None ):
    if not session == False:
        if session is True:
            session = authenticate.authenticate(config)

        if session == False:
            return {"status" : "error", "type" : "authenticate"}

    url = urls.activity_info.replace("{{SCHOOL_ID}}", str(config["school_id"])).replace("{{ACTIVITY_ID}}", str(activity_id))

    if not session == False:
        # Insert the session information from the auth function
        cookies = {
            "lecmobile" : "0",
            "ASP.NET_SessionId" : session["ASP.NET_SessionId"],
            "LastLoginUserName" : session["LastLoginUserName"],
            "lectiogsc" : session["lectiogsc"],
            "LectioTicket" : session["LectioTicket"]
        }

    else:
        cookies = {}

    settings = {}

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type" : "application/x-www-form-urlencoded",
        "Host" : "www.lectio.dk",
        "Origin" : "https://www.lectio.dk",
        "Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.post(url, data=settings, headers=headers)

    html = response.text

    soup = Soup(html)

    # Find all the different rows in the table
    rows = []

    for x in soup.find("div", attrs={"id" : "m_Content_LectioDetailIslandLesson_pa"}).find("table").findAll("tr", recursive=False):
        rows.append(x.find("td"))

    headers = soup.find("div", attrs={"id" : "m_Content_LectioDetailIslandLesson_pa"}).find("table").findAll("th")

    headers[3].string.replace_with("EleverAs")

    # Make rows[n] match headers[n]
    for index, element in enumerate(rows):
        table = element.find_parent("table")
        if table["class"][0] == u"NoFrame":
            del rows[index]

    # Generate a map of rows
    rowMap = functions.mapRows(headers, rows)

    # Retrieve the values
    showed_in_values = unicode(rowMap["Vises"].text).split(", ")
    showed_in = []

    type = unicode(rowMap["Type"].text)
    status = unicode(rowMap["Status"].text)
    students_resserved = unicode(rowMap["Deltagerereserveret"].text)

    teams = [] # Done
    students = [] # Done
    ressources = [] # Test Missing
    rooms = [] # Done
    teachers = [] # Done
    documents = [] # Done
    links = [] # Done
    students_education_assigned = [] # Missing Test
    homework = []

    ressourceProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/SkemaNy.aspx\?type=lokale&nosubnav=1&id=(?P<ressource_id>.*)&week=(?P<week>.*)")

    for x in rowMap["Ressourcer"].findAll("a"):
        ressoureceGroups = ressourceProg.match(x["href"])

        ressources.append({
            "ressource_id" : ressoureceGroups.group("ressource_id") if not ressoureceGroups is None else ""
        })

    for x in rowMap["EleverAs"].findAll("a"):
        students_education_assigned.append({
            "student_id" : x["lectiocontextcard"].replace("S", "")
        })

    dateProg = re.compile(r"(?P<day_name>.*) (?P<day>.*)\/(?P<month>.*) (?P<module>.*)\. modul, uge (?P<week>.*)")
    termValue = soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"]
    alternativeDateProg = re.compile(r"(?P<day_name>.*) (?P<day>.*)\/(?P<month>.*) (?P<start_time>.*) - (?P<end_time>.*), uge (?P<week>.*)")
    multiDayProg = re.compile(r"(?P<start_day_name>.*) (?P<start_day>.*)\/(?P<start_month>.*) (?P<start_time>.*) - (?P<end_day_name>.*) (?P<end_day>.*)\/(?P<end_month>.*) (?P<end_time>.*), uge (?P<week>.*)")

    altDateGroups = alternativeDateProg.match(rowMap["Tidspunkt"].text.strip().replace("\r", "").replace("\n", "").replace("\t", ""))
    dateGroups = dateProg.match(rowMap["Tidspunkt"].text.strip().replace("\r", "").replace("\n", "").replace("\t", ""))
    multiDayGroups  = multiDayProg.match(rowMap["Tidspunkt"].text.strip().replace("\r", "").replace("\n", "").replace("\t", ""))

    startDate = None
    endDate = None

    if not dateGroups is None and not modules == None:
        if int(dateGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startTime = "12:00"
        endTime = "00:00"

        for x in modules:
            if str(x["module"]) == str(dateGroups.group("module")):
                startTime = x["start"]
                endTime = x["end"]

        startDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), year, startTime), "%d/%m-%Y %H:%M")
        endDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), year, endTime), "%d/%m-%Y %H:%M")
    elif not multiDayGroups is None:
        if int(multiDayGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(multiDayGroups.group("day")), functions.zeroPadding(multiDayGroups.group("month")), year, multiDayGroups.group("start_time")), "%d/%m-%Y %H:%M")
        endDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(multiDayGroups.group("day")), functions.zeroPadding(multiDayGroups.group("month")), year, multiDayGroups.group("end_time")), "%d/%m-%Y %H:%M")
    elif not altDateGroups is None:
        if int(altDateGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(altDateGroups.group("day")), functions.zeroPadding(altDateGroups.group("month")), year, altDateGroups.group("start_time")), "%d/%m-%Y %H:%M")
        endDate = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(altDateGroups.group("day")), functions.zeroPadding(altDateGroups.group("month")), year, altDateGroups.group("end_time")), "%d/%m-%Y %H:%M")

    # Created and updated dates
    metaProg = re.compile(values.activity_updated_regex)

    metaElements = rowMap["Systeminformation"].text.strip().split("\n")
    metaString = ""
    for me in metaElements:
        metaString = metaString + " " + me.replace("\t\t\t\t", "").replace("\r", "").strip()

    metaGroups = metaProg.search(metaString)

    # Loop through the documents and append to the list
    documentTable = rowMap["Dokumenter"].find("table")
    if not documentTable == None:
        documentRows = documentTable.findAll("td")
        for documentRow in documentRows:
            # Split the size from the unit
            fileSizeProg = re.compile(values.file_size_regex)
            fileSizeGroups = fileSizeProg.search(documentRow.text)

            # Find the different document info elements
            elements = documentRow.findAll("a")

            if len(elements) > 0:
                # Filter the id from the document url
                documentProg = re.compile(values.document_url_regex)
                documentGroups = documentProg.search(elements[1]["href"])

                # Append to the list
                documents.append({
                    "name" : elements[1].text.encode("utf8"),
                    "size" : {
                        "size" : fileSizeGroups.group("size").replace(",", "."),
                        "unit" : fileSizeGroups.group("unit_name")
                    },
                    "type" : "timetable_document",
                    "document_id" : documentGroups.group("document_id")
                })

    # Loop through the students and append to the list
    studentRows = rowMap["Elever"].findAll("a")
    for student,classObject in functions.grouped(studentRows,2):
        # Filter the id from the class URL
        studentClassProg = re.compile(values.class_url_regex)
        studentClassGroups = studentClassProg.search(classObject["href"])

        # Filter the student id from the URL
        studentIdProg = re.compile(values.student_url_regex)
        studentIdGroups = studentIdProg.search(student["href"])

        students.append({
            "name" : unicode(student.text),
            "class" : unicode(classObject.text),
            "context_card_id" : student["lectiocontextcard"],
            "student_id" : studentIdGroups.group("student_id"),
            "class_id" : studentClassGroups.group("class_id")
        })

    # Loop through the teams and append to the list
    for team in rowMap["Hold"].findAll("a"):
        # Filter the class name from the team name
        teamNameProg = re.compile(values.team_class_name_regex)
        teamNameGroups = teamNameProg.search(unicode(team.text))

        # Filter the id from the URL
        teamIdProg = re.compile(values.team_url_regex)
        teamIdGroups = teamIdProg.search(team["href"])

        if not teamIdGroups == None:
            # Append to the list
            teams.append({
                "class" : teamNameGroups.group("class_name"),
                "team" : teamNameGroups.group("team_name"),
                "name" : team.text,
                "team_id" : teamIdGroups.group("team_id")
            })

    # Loop through the values and append English and Computer easy readable values
    for value in showed_in_values:
        if value == u"i dags- og ugeændringer":
            showed_in.append("day_and_week_changes")
        elif value == u"Inde i berørte skemaer":
            showed_in.append("timetable")
        elif value == u"I toppen af berørte skemaer":
            showed_in.append("top_of_timetable")

    # Loop through the links and append them to the list
    for link in rowMap["Links"].findAll("a"):
        links.append({
            "url" : link["href"],
            "title" : unicode(link.text)
        })

    # Loop through the rooms and append them to the list
    for room in rowMap["Lokaler"].findAll("a"):
        # Initialize variables
        roomName = ""
        roomNumber = ""

        # Filter the number from the name
        roomNameProg = re.compile(values.room_name_regex)
        roomNameGroups = roomNameProg.search(unicode(room.text))

        if not roomNameGroups == None:
            roomName = roomNameGroups.group("room_name")
            roomNumber = roomNameGroups.group("room_number")

         # Initialize roomId RegEx
        roomIdProg = re.compile(values.room_url_regex)

        # Filter the id from the URL
        roomIdGroups = roomIdProg.search(room["href"])

        # Append the room to the list
        rooms.append({
            "name" : roomName,
            "number" : roomNumber,
            "room_id" : roomIdGroups.group("room_id")
        })

    # Loop through the teachers and append them to the list
    for teacher in rowMap["Laerere"].findAll("a"):
        # Filter the abbrevation from the name
        teacherNameProg = re.compile(values.name_with_abbrevation_regex)
        teacherNameGroups = teacherNameProg.search(unicode(teacher.text))

        # Filter the id from the URL
        teacherIdProg = re.compile(values.teacher_url_regex)
        teacherIdGroups = teacherIdProg.search(teacher["href"])

        # Append to the list
        teachers.append({
            "context_card_id" : teacher["lectiocontextcard"],
            "name" : teacherNameGroups.group("name"),
            "abbrevation" : teacherNameGroups.group("abbrevation"),
            "teacher_id" : teacherIdGroups.group("teacher_id"),
            "school_id" : teacherIdGroups.group("school_id")
        })

    # Loop over the diferent homework notes and append to the list
    for object in values.activity_homework_regexs:
        prog = re.compile(object["expression"])
        matches = prog.finditer(unicode(rowMap["Lektier"].text.replace("\t", "")))

        # Loop over the matches
        for element in matches:
            if object["name"] == "note":
                if not element.group("note") == "":
                    homework.append({
                        "note" : element.group("note"),
                        "type" : "note"
                    })
            else:
                homework.append({
                    "note" : element.group("note"),
                    "class" : element.group("class"),
                    "authors" : element.group("writers").split(", "),
                    "name" : element.group("name"),
                    "pages" : element.group("pages"),
                    "subject" : element.group("subject"),
                    "publisher" : element.group("publisher"),
                    "type" : "book"
            })
    # Initialize note variable
    note = unicode(rowMap["Note"].text)

    # Return all the information
    return {
        "status" : "ok",
        "time" : unicode(rowMap["Tidspunkt"].text),
        "teams" : teams,
        "type" : "school" if type == "Lektion" else "other_activity" if type == "Anden aktivitet" else "other",
        "students_education_assigned" : students_education_assigned,
        "teachers" : teachers,
        "rooms" : rooms,
        "ressources" : ressources,
        "note" : note.encode("utf8"),
        "documents" : documents,
        "homework" : homework, # Match books with the list of books
        "links" : links,
        "students_resserved" : "true" if students_resserved.strip() == "Ja" else "false",
        "showed_at" : showed_in,
        "activity_status" : "done" if status == "Afholdt" else "planned" if status == "Planlagt" else "cancelled" if status == "Aflyst" else "other",
        "students" : students,
        "created" : {
            "at" : datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(metaGroups.group("created_date")),functions.zeroPadding(metaGroups.group("created_month")),functions.zeroPadding(metaGroups.group("created_year")),functions.zeroPadding(metaGroups.group("created_hour")),functions.zeroPadding(metaGroups.group("created_minute"))), "%d/%m-%Y %H:%M") if not metaGroups is None else "",
            "by" : metaGroups.group("created_teacher") if not metaGroups is None else ""
        },
        "updated" : {
            "at" : datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(metaGroups.group("updated_date")),functions.zeroPadding(metaGroups.group("updated_month")),functions.zeroPadding(metaGroups.group("updated_year")),functions.zeroPadding(metaGroups.group("updated_hour")),functions.zeroPadding(metaGroups.group("updated_minute"))), "%d/%m-%Y %H:%M") if not metaGroups is None else "",
            "by" : metaGroups.group("updated_teacher") if not metaGroups is None else ""
        },
        "term" : {
            "value" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
            "years_string" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
        },
        "date" : {
            "start" : startDate,
            "end" : endDate
        }
    }
Beispiel #8
0
def message(config, session=False):
    url = "https://www.lectio.dk/lectio/%s/beskeder2.aspx?type=liste&elevid=%s" % (
        str(config["school_id"]), str(config["student_id"]))

    if session is False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}

    # Insert the session information from the auth function
    cookies = {
        "lecmobile": "0",
        "ASP.NET_SessionId": session["ASP.NET_SessionId"],
        "LastLoginUserName": session["LastLoginUserName"],
        "lectiogsc": session["lectiogsc"],
        "LectioTicket": session["LectioTicket"]
    }

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.get(url, headers=headers)

    html = response.text

    soup = Soup(html)

    viewStateX = soup.find("input", attrs={"id": "__VIEWSTATEX"})["value"]

    settings = {
        "__EVENTTARGET": "__Page",
        "__EVENTARGUMENT": "$LB2$_MC_$_%s" % (str(config["thread_id"])),
        "__VIEWSTATEX": viewStateX,
    }

    response = proxy.session.post(url, data=settings, headers=headers)

    html = response.text

    soup = Soup(html)

    if soup.find("div",
                 attrs={"id": "s_m_Content_Content_ViewThreadPagePanel"
                        }) is None:
        return {"status": False, "error": "Data not found"}

    flagged = False if soup.find(
        "input", attrs={"id": "s_m_Content_Content_FlagThisThreadBox"
                        })["src"] == "/lectio/img/flagoff.gif" else True

    originalElements = soup.find("table",
                                 attrs={
                                     "class": "ShowMessageRecipients"
                                 }).findAll("td")

    originalSenderUser = context_card.user(
        {
            "context_card_id":
            originalElements[8].find("span")["lectiocontextcard"],
            "school_id":
            config["school_id"]
        }, session)

    originalSenderUser["user"]["user_context_card_id"] = originalElements[
        8].find("span")["lectiocontextcard"]
    originalSenderUser["user"]["person_id"] = originalElements[8].find(
        "span")["lectiocontextcard"].replace("U", "")

    originalSubject = unicode(functions.cleanText(originalElements[2].text))

    recipients = []

    studentRecipientProg = re.compile(
        r"(?P<name>.*) \((?P<student_class_id>.*)\)")
    teacherRecipientProg = re.compile(r"(?P<name>.*) \((?P<abbrevation>.*)\)")

    # Fill in the single users, added as recipients
    for row in originalElements[11].findAll("span"):
        context_card_id = row["lectiocontextcard"]
        userType = ""
        data = {"context_card_id": context_card_id}

        if "S" in context_card_id:
            userType = "student"
            studentGroups = studentRecipientProg.match(row.text)
            data["person_id"] = context_card_id.replace("S", "")
            data["student_id"] = context_card_id.replace("S", "")
            data["name"] = unicode(studentGroups.group(
                "name")) if not studentGroups is None else ""
            data["student_class_id"] = studentGroups.group(
                "student_class_id") if not studentGroups is None else ""

        elif "T" in context_card_id:
            userType = "teacher"
            teacherGroups = teacherRecipientProg.match(row.text)
            data["person_id"] = context_card_id.replace("T", "")
            data["teacher_id"] = context_card_id.replace("T", "")
            data["abbrevation"] = unicode(teacherGroups.group(
                "abbrevation")) if not teacherGroups is None else ""
            data["name"] = unicode(teacherGroups.group(
                "name")) if not teacherGroups is None else ""

        data["type"] = userType

        recipients.append(data)

        row.decompose()

    recipientRows = originalElements[11].text.split(", ")

    for row in recipientRows:
        text = row.replace("\n", "").replace("\r", "").replace("\t", "")

        if "Holdet" in text:
            text = text.replace("Holdet ", "")

            recipients.append({"type": "team", "name": unicode(text)})
        elif "Gruppen" in text:
            text = text.replace("Gruppen ", "")
            recipients.append({"type": "group", "name": unicode(text)})

    messages = []

    answerProg = re.compile(
        r"javascript:__doPostBack\('__Page','ANSWERMESSAGE_(?P<message_id>.*)'\);"
    )
    dateTimeProg = re.compile(
        r"(?P<day>.*)\/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
    messageLevels = {}

    for row in soup.find("table",
                         attrs={
                             "id": "s_m_Content_Content_ThreadTable"
                         }).findAll("tr"):
        if not row.find("table") is None:
            level = row.findAll(has_colspan)[0]["colspan"]
            data = {}
            messageDetailElements = row.find("table").findAll("td")

            # Subject
            data["subject"] = unicode(messageDetailElements[0].find("h4").text)
            messageDetailElements[0].find("h4").decompose()

            # Sender
            messageSender = context_card.user(
                {
                    "context_card_id":
                    messageDetailElements[0].find("span")["lectiocontextcard"],
                    "school_id":
                    config["school_id"]
                }, session)

            messageSender["user"]["user_context_card_id"] = originalElements[
                8].find("span")["lectiocontextcard"]
            messageSender["user"]["person_id"] = originalElements[8].find(
                "span")["lectiocontextcard"].replace("U", "")
            data["sender"] = messageSender["user"]

            messageDetailElements[0].find("span").decompose()

            # Time
            timeText = messageDetailElements[0].text.replace(
                "Af , ", "").strip().replace("\n", "").replace("\t", "")
            dateGroups = dateTimeProg.match(timeText)
            data["date"] = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""

            # Message id
            answerGroups = answerProg.match(
                messageDetailElements[1].find("button")["onclick"])
            message_id = answerGroups.group(
                "message_id") if not answerGroups is None else ""
            data["message_id"] = message_id

            row.find("table").decompose()

            # Get message text
            data["message"] = unicode(row.text.strip())

            # Get parent
            if str(int(level) + 1) in messageLevels:
                data["parrent_id"] = messageLevels[str(int(level) + 1)]

            messageLevels[level] = message_id

            messages.append(data)

    messageInfo = {
        "original_subject": originalSubject,
        "flagged": flagged,
        "original_sender": originalSenderUser["user"],
        "recipients": recipients,
        "messages": messages
    }

    return {
        "status": "ok",
        "message": messageInfo,
    }
Beispiel #9
0
def exam_team ( config ):
	url = "https://www.lectio.dk/lectio/%s/proevehold.aspx?type=proevehold&ProeveholdId=%s" % ( str(config["school_id"]), str(config["test_team_id"]) )

	cookies = {}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("div", attrs={"id" : "m_Content_LectioDetailIslandProevehold_pa"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	tables = soup.find("div", attrs={"id" : "m_Content_LectioDetailIslandProevehold_pa"}).findAll("table")
	oneDayProg = re.compile(r"(?P<day>.*)\/(?P<month>.*)-(?P<year>.*)")
	dateTimeProg = re.compile(r"(?P<day>.*)\/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
	dayTimeProg = re.compile(r"(?P<day>.*)\/(?P<month>.*) (?P<hour>.*):(?P<minute>.*)")
	multiDayProg = re.compile(r"(?P<start_day>.*)\/(?P<start_month>.*)-(?P<start_year>.*) - (?P<end_day>.*)\/(?P<end_month>.*)-(?P<end_year>.*)")

	informationElements = tables[0].findAll("td")

	teamNameProg = re.compile(r"(?P<team_full_name>.*) \((?P<team_class>.*) (?P<subject_abbrevation>.*)\)")
	teamNameGroups = teamNameProg.match(informationElements[5].text)
	teamNameAlternativeProg = re.compile(r"(?P<team_full_name>.*) \((?P<class_number>) (?P<team_class>.*) (?P<team_name>.*)\)")
	teamNameAlternativeGroups = teamNameAlternativeProg.match(informationElements[5].text)
	teamSecondProg = re.compile(r"(?P<team_full_name>.*) \((?P<team_name>.*)\)")
	teamSecondGroups = teamSecondProg.match(informationElements[5].text)

	xprsProg = re.compile(r"(?P<code>.*) (?P<type>.*) (?P<subject_name>.*)")
	xprsGroups = xprsProg.match(unicode(informationElements[7].text))
	xprs_type = xprsGroups.group("type") if not xprsGroups is None else ""

	test_type = informationElements[11].text

	rooms = []
	roomNameProg = re.compile(r"(?P<alternative_name>.*)? - (?P<room_name>.*) \((?P<exam_room_type>.*)\)")

	for room in informationElements[13].text.split(", "):
		roomNameGroups = roomNameProg.match(room)
		room_type = roomNameGroups.group("exam_room_type") if not roomNameGroups is None else ""
		rooms.append({
			"room_name" : roomNameGroups.group("room_name") if not roomNameGroups is None else "",
			"alternative_name" : roomNameGroups.group("alternative_name") if not roomNameGroups is None and "alternative_name" in roomNameGroups.groupdict() else "",
			"exam_room_type" : "preparation" if room_type == "Forberedelse" else "preparation_2" if room_type == "Forberedelse 2" else "examination",
			"room_type" : room_type
		})

	students = []
	studentRows = tables[1].findAll("tr")
	headers = studentRows[0].findAll("td")
	studentRows.pop(0)

	examStart = None
	examEnd = None
	preperationStart = None
	preperationEnd = None
	eventStart = None
	eventEnd = None

	longPreperationTime = False
	preperation = False
	inGroups = False

	if headers[len(headers)-1].text == "Gruppe s**t":
		inGroups = True

	studentClassIdProg = re.compile(r"(?P<class_name>.*) (?P<student_class_id>.*)")

	for student in studentRows:
		groupStart = None
		groupEnd = None
		studentPreperationStart = None
		studentPreperationEnd = None
		group_number = None

		elements = student.findAll("td")

		studentClassIdGrups = studentClassIdProg.match(elements[0].text)
		studentClassIdFull = elements[0].text
		name = unicode(elements[1].text)
		class_code = elements[2].text

		if inGroups is True:
			startDayGroups = oneDayProg.match(elements[3].text)
			endDayGroups = oneDayProg.match(elements[3].text)
			studentStartTime = elements[4].text
			studentEndTime = elements[5].text
			group_number = elements[6].text

			groupStart = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), elements[7].text), "%d/%m-%Y %H:%M")
			groupEnd = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), elements[8].text), "%d/%m-%Y %H:%M")
		elif headers[3].text == "Lang forb. start":
			longPreperationTime = True

			startDayGroups = oneDayProg.match(elements[4].text)
			endDayGroups = oneDayProg.match(elements[4].text)
			studentStartTime = elements[5].text
			studentEndTime = elements[6].text

			longPreperationGroups = dayTimeProg.match(elements[3].text)

			studentPreperationStartTime =  longPreperationGroups.group("hour") + ":" + longPreperationGroups.group("minute")

			studentPreperationStart = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(longPreperationGroups.group("day")), functions.zeroPadding(longPreperationGroups.group("month")), "20" + startDayGroups.group("year"), studentPreperationStartTime), "%d/%m-%Y %H:%M")
			studentPreperationEnd = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), studentStartTime), "%d/%m-%Y %H:%M")
		elif headers[4].text == "Forb.":
			preperation = True

			startDayGroups = oneDayProg.match(elements[3].text)
			endDayGroups = oneDayProg.match(elements[3].text)
			studentStartTime = elements[5].text
			studentEndTime = elements[6].text

			studentPreperationStartTime =  elements[4].text

			studentPreperationStart = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), studentPreperationStartTime), "%d/%m-%Y %H:%M")
			studentPreperationEnd = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), studentStartTime), "%d/%m-%Y %H:%M")
		else:
			startDayGroups = oneDayProg.match(elements[3].text)
			endDayGroups = startDayGroups
			studentStartTime = elements[4].text
			studentEndTime = elements[5].text

		studentStart = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(startDayGroups.group("day")), functions.zeroPadding(startDayGroups.group("month")), "20" + startDayGroups.group("year"), studentStartTime), "%d/%m-%Y %H:%M")
		studentEnd = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(endDayGroups.group("day")), functions.zeroPadding(endDayGroups.group("month")), "20" + endDayGroups.group("year"),studentEndTime), "%d/%m-%Y %H:%M")

		if preperationStart is None:
			preperationStart = studentPreperationStart
		elif studentPreperationStart < preperationStart:
			preperationStart = studentPreperationStart

		if preperationEnd is None:
			preperationEnd = studentPreperationEnd
		elif studentPreperationEnd > preperationEnd:
			preperationEnd = studentPreperationEnd

		if examStart is None:
			examStart = studentStart
		elif studentStart < examStart:
			examStart = studentStart

		if examEnd is None:
			examEnd = studentEnd
		elif studentEnd > examEnd:
			examEnd = studentEnd

		if preperationStart is None:
			eventStart = examStart
		else:
			eventStart = preperationStart

		eventEnd = examEnd

		studentEventStart = None
		studentEventEnd = None

		if not studentPreperationStart == None:
			studentEventStart = studentPreperationStart
			studentEventEnd = studentEnd
		else:
			studentEventStart = studentStart
			studentEventEnd = studentEnd

		students.append({
			"student_class_id_full" : studentClassIdFull,
			"student_class_id" : studentClassIdGrups.group("student_class_id") if not studentClassIdGrups is None else "",
			"class_name" : studentClassIdGrups.group("class_name") if not studentClassIdGrups is None else "",
			"class_code" : class_code if not class_code is None else "",
			"name" : name,
			"is_group" : inGroups,
			"group_number" : group_number if not group_number is None else "",
			"group_time" : {
				"start" : groupStart if not groupStart is None else "",
				"end" : groupEnd if not groupEnd is None else ""
			},
			"examination" : {
				"start" : studentStart if not studentStart is None else "",
				"end" : studentEnd if not studentEnd is None else ""
			},
			"preperation_type" : "long" if longPreperationTime is True else "normal" if preperation is True else "none",
			"preperation" : {
				"start" : studentPreperationStart if not studentPreperationStart is None else "",
				"end" : studentPreperationEnd if not studentPreperationEnd is None else ""
			},
			"start" : studentEventStart if not studentEventStart is None else "",
			"end" : studentEventEnd if not studentEventEnd is None else ""
		})

	teachers = []
	teacherProg = re.compile(r"(?P<abbrevation>.*) - (?P<name>.*)")
	for teacher in informationElements[3].contents:
		if len(teacher) > 1 and not unicode(teacher) == u"<br/>":
			teacherGroups = teacherProg.match(unicode(teacher))
			teachers.append({
				"name" : unicode(teacherGroups.group("name")) if not teacherGroups is None else "",
				"abbrevation" : unicode(teacherGroups.group("abbrevation")) if not teacherGroups is None else ""
			})

	censors = []
	censorProg = re.compile(r"(?P<institution_id>.*) - (?P<institution>.*)")
	for censor in informationElements[9].contents:
		if censor and not str(censor) == str("<br/>"):
			censorGroups = censorProg.match(str(censor))
			censors.append({
				"institution_id" : unicode(censorGroups.group("institution_id")) if not censorGroups is None else "",
				"institution" : unicode(censorGroups.group("institution")) if not censorGroups is None else ""
			})

	if not teamNameGroups is None:
		team = {
			"full_name" : unicode(teamNameGroups.group("team_full_name")) if not teamNameGroups is None else "",
			"team_class" : unicode(teamNameGroups.group("team_class")) if not teamNameGroups is None else "",
			"subject_abbrevation" : unicode(teamNameGroups.group("subject_abbrevation")) if not teamNameGroups is None else "",
			"team_class_name" : teamSecondGroups.group("team_name") if not teamSecondGroups is None else ""
		}
	elif not teamNameAlternativeGroups is None:
		team = {
			"full_name" : unicode(teamNameAlternativeGroups.group("team_full_name")) if not teamNameAlternativeGroups is None else "",
			"team_class" : unicode(teamNameAlternativeGroups.group("team_class")) if not teamNameAlternativeGroups is None else "",
			"class_number" : unicode(teamNameAlternativeGroups.group("class_number")) if not teamNameAlternativeGroups is None else "",
			"team_name" : unicode(teamNameAlternativeGroups.group("team_name")) if not teamNameAlternativeGroups is None else "",
			"team_class_name" : teamSecondGroups.group("team_name") if not teamSecondGroups is None else ""
		}
	else:
		team = {
			"full_name" : unicode(informationElements[5].text),
			"team_class_name" : teamSecondGroups.group("team_name") if not teamSecondGroups is None else ""
		}

	test_type_code = "other"
	gym_type = "AGYM"
	test_type_team_name = ""

	testTypeCodeProg = re.compile(r"(?P<team_name>.*) (?P<code>[\w\S]*)$")
	testTypeCodeGroups = testTypeCodeProg.match(informationElements[1].text.strip())
	testTypeAltCodePRog = re.compile(r"(?P<team_name>.*) (?P<code>[\w\S]*) \((?P<gym_type>[\w\S]*)\)$")
	testTypeCodeAltGroups = testTypeAltCodePRog.match(informationElements[1].text.strip())

	if not testTypeCodeAltGroups is None:
		test_type_team_name = testTypeCodeAltGroups.group("team_name")
		gym_type = testTypeCodeAltGroups.group("gym_type")
		test_type_code = testTypeCodeAltGroups.group("code")
	elif not testTypeCodeGroups is None:
		test_type_team_name = testTypeCodeGroups.group("team_name")
		test_type_code = testTypeCodeGroups.group("code")

	xprs_code = xprsGroups.group("code") if not xprsGroups is None else ""
	xprs_level = "A" if "A" in xprs_code else "B" if "B" in xprs_code else "C" if "C" in xprs_code else "D" if "D" in xprs_code else "E" if "E" in xprs_code else "F" if "F" in xprs_code else "-"

	information = {
		"test_team_name" : informationElements[1].text,
		"teachers" : teachers,
		"students" : students,
		"censors" : censors,
		"test_type_team_name" : test_type_team_name,
		"gym_type" : gym_type,
		"test_type_code" : test_type_code,
		"team" : team,
		"xprs_test" : True if not informationElements[7].text == "(ikke XPRS eksamen)" else False,
		"xprs" : {
			"full_name" : unicode(informationElements[7].text),
			"code_full" : xprs_code,
			"code" : xprs_code.replace(xprs_level, ""),
			"type" : "written" if xprs_type == "SKR" else "combined" if xprs_type == "SAM" else "oral" if xprs_type == "MDT" else xprs_type,
			"subject" : xprsGroups.group("subject_name") if not xprsGroups is None else "",
			"xprs_type" : xprs_type,
			"level" : xprs_level
		},
		"test_type" : "written" if test_type == "Skriftlig eksamen" else "oral" if test_type == "Mundtlig eksamen" else "combined" if test_type == "Samlet vurdering" else test_type,
		"number_of_students" : informationElements[19].text,
		"test_type_long_code" : test_type,
		"note" : informationElements[17].text if len(informationElements[17].text) > 1 else "",
		"rooms" : rooms,
		"time" : {
			"start" : examStart,
			"end" : examEnd
		},
		"preperation" : {
			"start" : preperationStart,
			"end" : preperationEnd
		},
		"group_examination" : inGroups,
		"preperation_type" : "long" if longPreperationTime is True else "normal" if preperation is True else "none",
		"event" : {
			"start" : eventStart,
			"end" : eventEnd
		},
		 "term" : {
            "value" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
            "years_string" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
        },
	}

	return {
		"status" : "ok",
		"information" : information
	}
Beispiel #10
0
def grades ( config, term, session = False ):
	url = "https://www.lectio.dk/lectio/%s/grades/grade_report.aspx?elevid=%s" % ( str(config["school_id"]), str(config["student_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}
	# Insert the session information from the auth function
	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.post(url, headers=headers, data={"s$m$ChooseTerm$term" : term})

	html = response.text

	soup = Soup(html)

	if soup.find("table", attrs={"id" : "s_m_Content_Content_karakterView_KarakterGV"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	comments = []
	commentRows = []

	subjectLevelProg = re.compile(r"(?P<abbrevation>[A-Z]*)(?P<level>[a-z]?)")

	subjectAbbrevationMapping = functions.subjectAbbrevationMapping

	if not soup.find("table", attrs={"id" : "s_m_Content_Content_remarks_grid_remarks_grid"}) is None:
		commentRows = soup.find("table", attrs={"id" : "s_m_Content_Content_remarks_grid_remarks_grid"}).findAll("tr")
		commentRows.pop(0)

	dateTime = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
	dateShort = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*)")

	termValue = soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"]

	if not soup.find(attrs={"id" : "s_m_Content_Content_remarks_grid_remarks_grid"}) is None:
		if soup.find(attrs={"id" : "s_m_Content_Content_remarks_grid_remarks_grid"}).find(".noRecord") is None:
			## Missing Test Opporunity
			for row in commentRows:
				if row.find("div") is None:
					elements = row.findAll("td")
					date = ""

					if not dateTime.match(elements[0].text) is None:
						dateTimeGroups = dateTime.match(elements[0].text)
						year = dateTimeGroups.group("year")

						if len(year) == 2:
							year = "20" + str(year)

						date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateTimeGroups.group("day")), functions.zeroPadding(dateTimeGroups.group("month")), year, dateTimeGroups.group("hour"), dateTimeGroups.group("minute")), "%d/%m-%Y %H:%M")
					elif dateShort.match(elements[0].text):
						year = dateTimeGroups.group("year")

						if len(year) == 2:
							year = "20" + str(year)

						date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateTimeGroups.group("day")), functions.zeroPadding(dateTimeGroups.group("month")), year, "12", "00"), "%d/%m-%Y %H:%M")

					comments.append({
						"date" : date,
						"abbrevation" : unicode(cleanText(elements[1].text)),
						"type" : "year_grade" if unicode(cleanText(elements[2].text)) == u"Årskarakter" else "exam_grade" if unicode(cleanText(elements[2].text)) == "Examenskarakter" else unicode(cleanText(elements[2].text)),
						"student_note" : unicode(cleanText(elements[3].text)),
						"term" : termValue
					})

	gradeNotes = []

	rows = soup.find("table", attrs={"id" : "s_m_Content_Content_karakterView_KarakterNoterGrid"}).findAll("tr")
	rows.pop(0)

	gradeTypeProg = re.compile(r"(?P<evaluation_type>.*) - (?P<type>.*)")
	teamProg = re.compile(r"(?P<class_name>.*) (?P<team_name>.*)")
	createdProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*) - (?P<teacher>.*)")
	teamElementIdProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/SkemaNy.aspx\?type=(?P<type_name>.*)&holdelementid=(?P<team_element_id>.*)")

	if soup.find("table", attrs={"id" : "s_m_Content_Content_karakterView_KarakterNoterGrid"}).find(".noRecord") is None:
		for row in rows:
			if row.find("span") is None:
				elements = row.findAll("td")

				gradeTypeGroups = gradeTypeProg.match(elements[1].text)
				evaluation_type = gradeTypeGroups.group("evaluation_type") if not gradeTypeGroups is None else ""
				grade_type = gradeTypeGroups.group("type") if not gradeTypeGroups is None else ""
				teamLementGroups = teamElementIdProg.match(elements[0].find("a")["href"])
				classGroups = teamProg.match(elements[0].find("a").text)
				createdGroups = createdProg.match(elements[3].text)
				if not createdGroups is None:
					date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(createdGroups.group("day")), functions.zeroPadding(createdGroups.group("month")), createdGroups.group("year"), createdGroups.group("hour"), createdGroups.group("minute")), "%d/%m-%Y %H:%M")
				else:
					date = datetime.now()

				gradeNotes.append({
					"team_full_name" : unicode(cleanText(elements[0].find("a").text)),
					"team_element_id" : teamLementGroups.group("team_element_id") if not teamLementGroups is None else "",
					"class_name" : classGroups.group("class_name") if not classGroups is None else "",
					"team_name" : classGroups.group("team_name") if not classGroups is None else "",
					"type" : "written" if grade_type == "skriftlig" else "oral",
					"evaluation_type" : "internal_test" if unicode(evaluation_type) == u"Intern prøve" else "exam_or_year_test" if unicode(evaluation_type) == u"Eksamens-/årsprøvekarakter" else "first_term" if evaluation_type == "1. standpunkt" else "second_term" if evaluation_type == "2. standpunkt" else "third_term" if evaluation_type == "3. standpunkt" else "firth_term" if evaluation_type == "4. standpunkt" else "fifth_term" if evaluation_type == "5. standpunkt" else "sixth_term",
					"grade" : elements[2].text,
					"note" : cleanText(unicode(elements[4].text)),
					"date" : date,
					"teacher_abbrevation" : unicode(createdGroups.group("teacher")) if not createdGroups is None else "",
					"term" : termValue
				})

	protocolLines = []
	termProg = re.compile(r"(?P<term>.*) (?P<year>.*)")
	xprsProg = re.compile(r"(?P<code>.*) (?P<subject>.*)")
	protocolRows = soup.find("table", attrs={"id" : "s_m_Content_Content_ProtokolLinierGrid"}).findAll("tr")
	protocolRows.pop(0)

	if soup.find("table", attrs={"id" : "s_m_Content_Content_ProtokolLinierGrid"}).find(".noRecord") is None:
		for row in protocolRows:
			spans = row.findAll("span")
			if len(spans) > 1:
				elements = row.findAll("td")
				termGroups = termProg.match(cleanText(elements[0].text))
				term = termGroups.group("term") if not termGroups is None else ""
				xprsGroups = xprsProg.match(elements[3].find("span").text)
				teamElement = context_card.team({"school_id" : str(config["school_id"]), "context_card_id" : elements[5].find("span")["lectiocontextcard"]}, session)["team"]
				teamElement["team_element_context_card_id"] = "HE" + teamElement["team_element_id"]

				protocolLines.append({
					"grading" : "7-step" if cleanText(elements[8].text) == "7-trinsskala" else "13-step",
					"grade" : elements[7].text,
					"weight" : cleanText(elements[6].text.replace("," , ".")),
					"evaluation_type" : "oral" if cleanText(elements[4].text) == "Mundtlig" else "written" if cleanText(elements[4].text) == "Skriftlig" else "combined",
					"counts" : True if cleanText(elements[2].text) == "Ja" else False,
					"text" : "year_grade" if unicode(cleanText(elements[1].text)) == u"Årskarakter" else "exam_grade",
					"team" : {
						"name" : unicode(elements[5].find("span").text),
						"context_card_id" : elements[5].find("span")["lectiocontextcard"],
						"team_id" : elements[5].find("span")["lectiocontextcard"].replace("H", ""),
						"team_element" : teamElement
					},
					"xprs" : {
						"full_name" : unicode(elements[3].find("span").text),
						"code" : xprsGroups.group("code") if not xprsGroups is None else "",
						"subject" : xprsGroups.group("subject") if not xprsGroups is None else "",
						"xprs_subject_id" : elements[3].find("span")["lectiocontextcard"].replace("XF", ""),
						"context_card_id" : elements[3].find("span")["lectiocontextcard"]
					},
					"term" : {
						"year" : termGroups.group("year") if not termGroups is None else "",
						"term" : "summer" if term == "Sommer" else "spring" if unicode(term) == u"Forår" else "fall" if unicode(term) == u"Efterår" else "winter"
					}
				})

	gradeList = []
	termMapping = {
		u"Intern prøve" : "internal_test",
		u"Årskarakter" : "year_grade",
		u"1.standpunkt" : "first_term",
		u"2.standpunkt" : "second_term",
		u"3.standpunkt" : "third_term",
		u"4.standpunkt" : "forth_term",
		u"5.standpunkt" : "fifth_term",
		u"6.standpunkt" : "sixth_term",
		u"Eksamens-/årsprøvekarakter" : "exam_or_year_test"
	}
	gradeListRows = soup.find("table", attrs={"id" : "s_m_Content_Content_karakterView_KarakterGV"}).findAll("tr")
	headers = gradeListRows[0].findAll("th")
	headers.pop(0)
	gradeListRows.pop(0)
	teamNameProg = re.compile(r"(?P<class_name>.*) (?P<subject_name>.*), (?P<evaluation_type>.*)")
	subjectAbbrevationProg = re.compile(r"(?P<team_type>.*) (?P<type>.*) (?P<subject>.*)")

	if soup.find("table", attrs={"id" : "s_m_Content_Content_karakterView_KarakterGV"}).find(".noRecord") is None:
		for row in gradeListRows:
			elements = row.findAll("td")

			if elements[0].find("b") is None:
				teamGroups = teamNameProg.match(cleanText(elements[0].text))
				teamElementGroups = teamElementIdProg.match(elements[0].find("a")["href"])
				elements.pop(0)
				className = teamGroups.group("class_name") if not teamGroups is None else ""
				subject = teamGroups.group("subject_name") if not teamGroups is None else ""
				teamName = className + " " + subject

				gradeElements = []
				index = 0

				for element in elements:
					if not cleanText(element.find("div").text) == "":
						header = unicode(headers[index].text)
						term = termMapping[header] if header in termMapping else "other"
						gradeElements.append({
							"term" : term,
							"grade" : cleanText(element.find("div").text)
						})

					index = index + 1

				evaluation_type = cleanText(teamGroups.group("evaluation_type")) if not teamGroups is None else ""

				subjectLevel = None

				subjectLevelGroups = subjectLevelProg.match(subject)
				
				if not subjectLevelGroups is None:
					subject = subjectLevelGroups.group("abbrevation")

				subjectAbbrevationGroups = None

				if len(subject) > 4:
					subjectAbbrevationGroups = subjectAbbrevationProg.match(elements[0].text)

					if not subjectAbbrevationGroups is None:
						subject = subjectAbbrevationGroups.group("subject")

				if not subjectLevelGroups is None:
					subject = subjectLevelGroups.group("abbrevation")

					if "level" in subjectLevelGroups.groupdict():
						subjectLevel = subjectLevelGroups.group("level").upper()

				if subject == "team":
					subject = "TM"

				subjectAltName = None
				subjectAltAbbrevation = None

				if subject in subjectAbbrevationMapping:
					subjectAltAbbrevation = subject
					subjectAltName = subjectAbbrevationMapping[subject]["name"]
					subject = subjectAbbrevationMapping[subject]["abbrevation"]

				data = {
					"evaluation_type" : "oral" if evaluation_type == "mundtlig" else "written" if evaluation_type == "skriftlig" else "combined",
					"team" : {
						"class_name" : className,
						"name" : teamName,
						"subject_abbrevation" : subject,
						"team_element_id" : teamElementGroups.group("team_element_id") if not teamElementGroups is None else "",
						"school_id" : teamElementGroups.group("school_id") if not teamElementGroups is None else ""
					},
					"grades" : gradeElements,
					"term" : {
						"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
						"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
					}
				}

				if not subjectLevel == None and len(subjectLevel) > 0:
					data["team"]["level"] = subjectLevel

				if not subjectAltAbbrevation is None:
					data["team"]["subject_alternative_abbrevation"] = subjectAltAbbrevation
					data["team"]["subject_alternative_name"] = subjectAltName

				if not subjectAbbrevationGroups is None:
					if subjectAbbrevationGroups.group("team_type").isdigit():
						data["team"]["year"] = subjectAbbrevationGroups.group("team_type")
					else:
						data["team"]["education_type"] = subjectAbbrevationGroups.group("team_type")

					data["team"]["type"] = subjectAbbrevationGroups.group("type")

				gradeList.append(data)


	diplomaLines = []
	diplomaRows = soup.find("div", attrs={"id" : "printareaDiplomaLines"}).find("table").findAll("tr")
	diplomaRows.pop(0)
	diplomaRows.pop(0)

	subjectProg = re.compile(r"(?P<subject_name>.*) (?P<subject_level>.*)")
	subjectProgAlternative = re.compile(r"(?P<subject_name>.*) (?P<subject_level>.*) (?P<type>.*)\.")

	if soup.find("div", attrs={"id" : "printareaDiplomaLines"}).find(".noRecord") is None:
		for row in diplomaRows:
			if row.find("span") is None:
				elements = row.findAll("td")
				if subjectProgAlternative.match(elements[0].text.strip().replace("\t", "").replace("\n", "").replace("\r", "").strip()):
					subjectGroups = subjectProgAlternative.match(elements[0].text.strip().replace("\t", "").replace("\n", "").replace("\r", "").strip())
				else:
					subjectGroups = subjectProg.match(elements[0].text.strip().replace("\t", "").replace("\n", "").replace("\r", "").strip())

				year_weight = cleanText(elements[1].text).replace(",", ".")
				year_grade = cleanText(elements[2].text)
				year_ects = cleanText(elements[3].text)
				exam_weight = cleanText(elements[4].text).replace(",", ".")
				exam_grade = cleanText(elements[5].text)
				exam_ects = cleanText(elements[6].text)

				evaluation_type = subjectGroups.group("type") if not subjectGroups is None and "type" in subjectGroups.groupdict() else None

				diplomaLines.append({
					"subject_full" : unicode(elements[0].text.replace("\t", "").replace("\n", "").replace("\r", "")),
					"subject_name" : subjectGroups.group("subject_name").replace("\t", "").replace("\n", "").replace("\r", "") if not subjectGroups is None else "",
					"subject_level" : subjectGroups.group("subject_level").replace("\t", "").replace("\n", "").replace("\r", "") if not subjectGroups is None else "",
					"year_weight" : year_weight if not year_weight.strip() == "-" and not year_weight == "??" else "waiting_for_exam" if year_weight.strip() == "??" else "unkown",
					"year_grade" : year_grade if not year_grade.strip() == "-" and not year_grade == "??" else "waiting_for_exam" if year_grade.strip() == "??" else "unkown",
					"year_ects" : year_ects if not year_ects.strip() == "-" and not year_ects == "??" else "waiting_for_exam" if year_ects.strip() == "??" else "unkown",
					"exam_weight" : exam_weight if not exam_weight.strip() == "-" and not exam_weight == "??" else "waiting_for_exam" if exam_weight.strip() == "??" else "unkown",
					"exam_grade" : exam_grade if not exam_grade.strip() == "-" and not exam_grade == "??" else "waiting_for_exam" if exam_grade.strip() == "??" else "unkown",
					"exam_ects" : exam_ects if not exam_ects.strip() == "-" and not exam_ects == "??" else "waiting_for_exam" if exam_ects.strip() == "??" else "unkown",
					"evaluation_type" : "oral" if evaluation_type == "mdt" else "written" if evaluation_type == "skr" else "combined"
				})

	avgElement = soup.find("span", attrs={"id" : "s_m_Content_Content_GradeAverageLabel"})
	for element in avgElement.findAll("span"):
		element.decompose()

	avgTextProg = re.compile(ur"Eksamensresultat ekskl\. bonus:     (?P<without_bonus>.*) Eksamensresultat inkl\. evt\. bonus: (?P<with_bonus>.*)")
	avgText = unicode(avgElement.text.strip().replace("\n", "").replace("\r", "").replace("\t", ""))
	avgGroups = avgTextProg.match(avgText)

	average = {
		"without_bonus" : avgGroups.group("without_bonus").replace(",", ".") if not avgGroups is None else "",
		"with_bonus" : avgGroups.group("with_bonus").replace(",", ".") if not avgGroups is None else ""
	}

	return {
		"status" : "ok",
		"comments" : comments,
		"grades" : gradeList,
		"grade_notes" : gradeNotes,
		"protocol_lines" : protocolLines,
		"diploma" : diplomaLines,
		"average" : average,
		"term" : {
			"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
			"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
		}
	}
Beispiel #11
0
def userinfo( config, session = False ):
	if session == False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}

	else:
		url = urls.front_page_url.replace("{{SCHOOL_ID}}", str(config["school_id"]))

		# Insert the session information from the auth function
		cookies = {
			"lecmobile" : "0",
			"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
			"LastLoginUserName" : session["LastLoginUserName"],
			"lectiogsc" : session["lectiogsc"],
			"LectioTicket" : session["LectioTicket"]
		}

		# Insert User-agent headers and the cookie information
		headers = {
			"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
			"Content-Type" : "application/x-www-form-urlencoded",
			"Host" : "www.lectio.dk",
			"Origin" : "https://www.lectio.dk",
			"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
		}

		response = proxy.session.get(url, headers=headers)

		html = response.text

		soup = Soup(html)

		lectio_user_id = soup.find("div", attrs={"id" : "s_m_masterleftDiv"}).find("a")["href"]
		picture_id = soup.find("img", attrs={"id" : "s_m_HeaderContent_picctrlthumbimage"})["src"]
		teamRows = soup.find("div", attrs={"id" : "s_m_Content_Content_HoldAndGroupList"}).find("table").findAll("tr")

		teams = []
		buildInGroups = []
		ownGroups = []

		idProg = re.compile(r"\/lectio\/(?P<school_id>[0-9]*)/SkemaNy.aspx\?type=(?P<type_name>.*)&holdelementid=(?P<team_element_id>.*)")
		teamProg = re.compile(r"(?P<class_name>.*) (?P<team_name>.*)")

		# Teams
		for row in teamRows[0].findAll("td")[1].findAll("a"):
			idGroups = idProg.match(row["href"])
			name = row.text
			teamGroups = teamProg.match(name)
			teams.append({
				"id" : idGroups.group("team_element_id"),
				"class_name" : unicode(teamGroups.group("class_name")) if not teamGroups is None else "",
				"team_name" : unicode(teamGroups.group("team_name")) if not teamGroups is None else "",
				"name" : name
			})

		# Build in Groups
		for row in teamRows[1].findAll("td")[1].findAll("a"):
			idGroups = idProg.match(row["href"])
			name = row.text
			buildInGroups.append({
				"id" : idGroups.group("team_element_id"),
				"name" : name
			})

		# Own groups
		for row in teamRows[2].findAll("td")[1].findAll("a"):
			idGroups = idProg.match(row["href"])
			id = idGroups.group("team_element_id"),
			name = row.text
			ownGroups.append({
				"id" : id,
				"name" : name
			})

		# Student name
		name = re.sub(r'"Eleven (\w+), (\w+) - Forside"',r'\2',soup.find("div", attrs={"id" : "s_m_HeaderContent_MainTitle"}).text)

		# s_m_Content_Content_BookReservationInfo_ctl00_DashBoardItem2

		# Info
		informations = []
		schoolTable = soup.find("table", attrs={"id" : "s_m_Content_Content_importantInfo"})
		examinations = []
		grades = []
		infoObjects = schoolTable.findAll("tr")
		dayTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

		if not soup.find("table", attrs={"id" : "s_m_Content_Content_KaraktererInfo"}) is None:
			for row in soup.find("table", attrs={"id" : "s_m_Content_Content_KaraktererInfo"}).findAll("tr"):
				elements = row.findAll("td")
				gradeTeams = []
				gradeTeamProg = re.compile(r"(?P<class_name>.*) (?P<team_name>.*)")
				dayTimeGroups = dayTimeProg.match(elements[2]["title"])

				for gradeTeam in elements[1]["title"].replace("Frigives: ", "").split(", "):
					gradeTeamGroups = gradeTeamProg.match(gradeTeam)
					gradeTeams.append({
						"class_name" : unicode(gradeTeamGroups.group("class_name")) if not gradeTeamGroups is None else "",
						"team_name" : unicode(gradeTeamGroups.group("team_name")) if not gradeTeamGroups is None else ""
					})
				grades.append({
					"date" : datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dayTimeGroups.group("day")), functions.zeroPadding(dayTimeGroups.group("month")), dayTimeGroups.group("year"), dayTimeGroups.group("hour"), dayTimeGroups.group("minute")), "%d/%m-%Y %H:%M"),
					"teams" : gradeTeams
				})

		if not soup.find("table", attrs={"id" : "s_m_Content_Content_EksamenerInfo"}) is None:
			examObjects = soup.find("table", attrs={"id" : "s_m_Content_Content_EksamenerInfo"}).findAll("tr")
		else:
			examObjects = []

		examIdProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=forside.aspx")

		for row in examObjects:
			elements = row.findAll("td")
			examIdGroups = examIdProg.match(elements[1].find("a")["href"])
			dayTimeGroups = dayTimeProg.match(elements[2]["title"])
			examNameProg = re.compile(r"(?P<class_name>.*) (?P<team_name>.*) (?P<type_name>.*)\. eks\.")
			examNameGroups = examNameProg.match(unicode(elements[1].find("a").find("span").text))
			type_name = examNameGroups.group("type_name") if not examNameGroups is None else ""
			examinations.append({
				"test_team_id" : examIdGroups.group("test_team_id"),
				"school_id" : examIdGroups.group("school_id"),
				"title" : unicode(elements[1].find("a").find("span").text),
				"class_name" : examNameGroups.group("class_name") if not examNameGroups is None else "",
				"team_name" : examNameGroups.group("team_name") if not examNameGroups is None else "",
				"date" : datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dayTimeGroups.group("day")), functions.zeroPadding(dayTimeGroups.group("month")), dayTimeGroups.group("year"), dayTimeGroups.group("hour"), dayTimeGroups.group("minute")), "%d/%m-%Y %H:%M")
			})

		if not infoObjects is None:
			for info in infoObjects:
				infoType = ""
				tds = info.findAll("td")
				if tds[0]["class"] is None or not tds[0]["class"] is None and not "norecord" in tds[0]["class"]:
					if  tds[0].find("img")["src"] == "/lectio/img/prio1.auto" :
						infoType = "red"
					elif tds[0].find("img")["src"] == "/lectio/img/prio2.auto":
						infoType = "yellow"
					elif tds[0].find("img")["src"] == "/lectio/img/prio3.auto":
						infoType = "grey"
					informations.append({
						"text" : tds[1].find("span").text,
						"type" : infoType
					})

		nameProg = re.compile(r"Eleven (?P<name>.*), (?P<class_name>.*) - Forside")
		nameGroups = nameProg.match(name)

		return {
			"status" : "ok",
			"student_id" : lectio_user_id.replace("/lectio/%s/SkemaNy.aspx?type=elev&elevid=" % (str(config["school_id"])), ""),
			"picture_id" : picture_id.replace("/lectio/%s/GetImage.aspx?pictureid=" % (str(config["school_id"])), ""),
			"teams" : teams,
			"buildInGroups" : buildInGroups,
			"ownGroups" : ownGroups,
			"name" : unicode(nameGroups.group("name")) if not nameGroups is None else "",
			"class_name" : nameGroups.group("class_name") if not nameGroups is None else "",
			"information" : informations,
			"examinations" : examinations,
			"grades" : grades,
			"username" : soup.find(attrs={"id" : "s_m_masterleftDiv"}).find("a").text
		}
def assignments( config ):
	session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}
	else:
		url = urls.assigment_list.replace("{{SCHOOL_ID}}", str(config["school_id"])).replace("{{STUDENT_ID}}", str(config["student_id"]))

		# Insert the session information from the auth function
		cookies = {
			"lecmobile" : "0",
			"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
			"LastLoginUserName" : session["LastLoginUserName"],
			"lectiogsc" : session["lectiogsc"],
			"LectioTicket" : session["LectioTicket"]
		}

		# Insert User-agent headers and the cookie information
		headers = {
			"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
			"Content-Type" : "application/x-www-form-urlencoded",
			"Host" : "www.lectio.dk",
			"Origin" : "https://www.lectio.dk",
			"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
		}

		validationRequest = response = proxy.session.get(url, headers=headers)

		html = response.text

		soup = Soup(html)

		firstViewState = urllib.urlencode({"__VIEWSTATEX" : soup.find(id="__VIEWSTATEX")["value"]})

		firstEventValidationText = soup.find(id="aspnetForm").find(id="__EVENTVALIDATION")["value"]

		firstEventValidation = urllib.urlencode({"__EVENTVALIDATION" : firstEventValidationText})

		firstResponse = proxy.session.post(url, data='__EVENTTARGET=s%24m%24Content%24Content%24CurrentExerciseFilterCB&__EVENTARGUMENT=&__LASTFOCUS='+firstEventValidation+"&"+firstViewState+"&time=0&__EVENTARGUMENT=&__VIEWSTATE=", headers=headers)

		html = firstResponse.text

		soup = Soup(html)

		viewState = urllib.urlencode({"__VIEWSTATEX" : soup.find(id="__VIEWSTATEX")["value"]})

		eventValidationText = soup.find(id="aspnetForm").find(id="__EVENTVALIDATION")["value"]

		eventValidation = urllib.urlencode({"__EVENTVALIDATION" : eventValidationText})

		response = proxy.session.post(url, data='__EVENTTARGET=s%24m%24Content%24Content%24ShowThisTermOnlyCB&__EVENTARGUMENT=&__LASTFOCUS='+eventValidation+"&"+viewState+"&time=0&__EVENTARGUMENT=&__VIEWSTATE=", headers=headers)

		html = response.text

		soup = Soup(html)

		if soup.find("table", attrs={"id" : "s_m_Content_Content_ExerciseGV"}) is None:
			return {
				"status" : False,
				"error" : "Data not found"
			}

		# Extract table cells
		tableRows = soup.find("table", attrs={"id" : "s_m_Content_Content_ExerciseGV"}).findAll("tr")

		# Remove the header cell
		del tableRows[0]

		assignmentsList = []

		for row in tableRows:
			cells = row.findAll("td")

			s = re.search('([0-9]*)\/([0-9]*)-([0-9]*) ([0-9]*):([0-9]*)',cells[3].text)
			date = functions.zeroPadding(s.group(1)) + "/" + functions.zeroPadding(s.group(2)) + "-" + s.group(3) + " " + s.group(4) + ":" + s.group(5)
			object = {}
			try:
				object["week"] = cells[0].find("span").text
			except BaseException:
				object["week"] = ""
			try:
				object["group"] = unicode(cells[1].find("span").text)
			except BaseException:
				object["group"] = ""
			try:
				object["title"] = unicode(cells[2].find("a").text)
			except BaseException:
				object["title"] = ""
			try:
				object["context_card_id"] = cells[1].find("span")["lectiocontextcard"]
				object["team_id"] = cells[1].find("span")["lectiocontextcard"].replace("HE", "")
			except BaseException:
				object["context_card_id"] = ""
				object["team_id"] = ""
			try:
				prog = re.compile(r"\/lectio\/(?P<school_id>.*)\/ElevAflevering.aspx\?elevid=(?P<student_id>.*)&exerciseid=(?P<exercise_id>.*)&(?P<the_other>.*)")
				urlGroups = prog.match(cells[2].find("a")["href"])
				object["exercise_id"] = urlGroups.group("exercise_id")
			except BaseException:
				object["exercise_id"] = ""
			try:
				object["link"] = cells[2].find("a")["href"]
			except BaseException:
				object["link"] = ""
			try:
				object["date"] = datetime.strptime(date,"%d/%m-%Y %H:%S")
			except BaseException:
				object["date"] = datetime.strptime("1/1-1977 00:01","%d/%m-%Y %H:%S")
			try:
				object["hours"] = float(cells[4].find("span").text.replace(",", ".").strip())
			except BaseException:
				object["hours"] = ""
			try:
				status = unicode(cells[5].find("span").text)
				object["status"] = "handed" if status == "Afleveret" else "missing" if status == "Mangler" else "waiting"
			except BaseException:
				object["status"] = ""
			try:
				object["leave"] = int(cells[6].text.replace(",", ".").replace("%", "").strip())
			except BaseException:
				object["leave"] = ""
			try:
				waiting_for = unicode(cells[7].find("span").text)
				object["waiting_for"] = "student" if waiting_for == "Elev" else "teacher"
			except BaseException:
				object["waiting_for"] = ""
			try:
				object["note"] = unicode(cells[8].text)
			except BaseException:
				object["note"] = ""
			try:
				object["grade"] = unicode(cells[9].text)
			except BaseException:
				object["grade"] = ""
			try:
				object["student_note"] = unicode(cells[10].text)
			except BaseException:
				object["student_note"] = ""

			assignmentsList.append(object)

		return {
			"list" : assignmentsList,
			"status" : "ok",
			"term" : {
            	"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
            	"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
        	}
		}
Beispiel #13
0
def activity_info(config, activity_id, session=False, modules=None):
    if not session == False:
        if session is True:
            session = authenticate.authenticate(config)

        if session == False:
            return {"status": "error", "type": "authenticate"}

    url = urls.activity_info.replace("{{SCHOOL_ID}}",
                                     str(config["school_id"])).replace(
                                         "{{ACTIVITY_ID}}", str(activity_id))

    if not session == False:
        # Insert the session information from the auth function
        cookies = {
            "lecmobile": "0",
            "ASP.NET_SessionId": session["ASP.NET_SessionId"],
            "LastLoginUserName": session["LastLoginUserName"],
            "lectiogsc": session["lectiogsc"],
            "LectioTicket": session["LectioTicket"]
        }

    else:
        cookies = {}

    settings = {}

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.post(url, data=settings, headers=headers)

    html = response.text

    soup = Soup(html)

    # Find all the different rows in the table
    rows = []

    for x in soup.find("div",
                       attrs={
                           "id": "m_Content_LectioDetailIslandLesson_pa"
                       }).find("table").findAll("tr", recursive=False):
        rows.append(x.find("td"))

    headers = soup.find("div",
                        attrs={
                            "id": "m_Content_LectioDetailIslandLesson_pa"
                        }).find("table").findAll("th")

    headers[3].string.replace_with("EleverAs")

    # Make rows[n] match headers[n]
    for index, element in enumerate(rows):
        table = element.find_parent("table")
        if table["class"][0] == u"NoFrame":
            del rows[index]

    # Generate a map of rows
    rowMap = functions.mapRows(headers, rows)

    # Retrieve the values
    showed_in_values = unicode(rowMap["Vises"].text).split(", ")
    showed_in = []

    type = unicode(rowMap["Type"].text)
    status = unicode(rowMap["Status"].text)
    students_resserved = unicode(rowMap["Deltagerereserveret"].text)

    teams = []  # Done
    students = []  # Done
    ressources = []  # Test Missing
    rooms = []  # Done
    teachers = []  # Done
    documents = []  # Done
    links = []  # Done
    students_education_assigned = []  # Missing Test
    homework = []

    ressourceProg = re.compile(
        r"\/lectio\/(?P<school_id>.*)\/SkemaNy.aspx\?type=lokale&nosubnav=1&id=(?P<ressource_id>.*)&week=(?P<week>.*)"
    )

    for x in rowMap["Ressourcer"].findAll("a"):
        ressoureceGroups = ressourceProg.match(x["href"])

        ressources.append({
            "ressource_id":
            ressoureceGroups.group("ressource_id")
            if not ressoureceGroups is None else ""
        })

    for x in rowMap["EleverAs"].findAll("a"):
        students_education_assigned.append(
            {"student_id": x["lectiocontextcard"].replace("S", "")})

    dateProg = re.compile(
        r"(?P<day_name>.*) (?P<day>.*)\/(?P<month>.*) (?P<module>.*)\. modul, uge (?P<week>.*)"
    )
    termValue = soup.find("select", attrs={
        "id": "m_ChooseTerm_term"
    }).select('option[selected="selected"]')[0]["value"]
    alternativeDateProg = re.compile(
        r"(?P<day_name>.*) (?P<day>.*)\/(?P<month>.*) (?P<start_time>.*) - (?P<end_time>.*), uge (?P<week>.*)"
    )
    multiDayProg = re.compile(
        r"(?P<start_day_name>.*) (?P<start_day>.*)\/(?P<start_month>.*) (?P<start_time>.*) - (?P<end_day_name>.*) (?P<end_day>.*)\/(?P<end_month>.*) (?P<end_time>.*), uge (?P<week>.*)"
    )

    altDateGroups = alternativeDateProg.match(
        rowMap["Tidspunkt"].text.strip().replace("\r",
                                                 "").replace("\n", "").replace(
                                                     "\t", ""))
    dateGroups = dateProg.match(rowMap["Tidspunkt"].text.strip().replace(
        "\r", "").replace("\n", "").replace("\t", ""))
    multiDayGroups = multiDayProg.match(
        rowMap["Tidspunkt"].text.strip().replace("\r",
                                                 "").replace("\n", "").replace(
                                                     "\t", ""))

    startDate = None
    endDate = None

    if not dateGroups is None and not modules == None:
        if int(dateGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startTime = "12:00"
        endTime = "00:00"

        for x in modules:
            if str(x["module"]) == str(dateGroups.group("module")):
                startTime = x["start"]
                endTime = x["end"]

        startDate = datetime.strptime(
            "%s/%s-%s %s" % (functions.zeroPadding(dateGroups.group("day")),
                             functions.zeroPadding(
                                 dateGroups.group("month")), year, startTime),
            "%d/%m-%Y %H:%M")
        endDate = datetime.strptime(
            "%s/%s-%s %s" %
            (functions.zeroPadding(dateGroups.group("day")),
             functions.zeroPadding(dateGroups.group("month")), year, endTime),
            "%d/%m-%Y %H:%M")
    elif not multiDayGroups is None:
        if int(multiDayGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startDate = datetime.strptime(
            "%s/%s-%s %s" %
            (functions.zeroPadding(multiDayGroups.group("day")),
             functions.zeroPadding(multiDayGroups.group("month")), year,
             multiDayGroups.group("start_time")), "%d/%m-%Y %H:%M")
        endDate = datetime.strptime(
            "%s/%s-%s %s" %
            (functions.zeroPadding(multiDayGroups.group("day")),
             functions.zeroPadding(multiDayGroups.group("month")), year,
             multiDayGroups.group("end_time")), "%d/%m-%Y %H:%M")
    elif not altDateGroups is None:
        if int(altDateGroups.group("month")) < 8:
            year = int(termValue) + 1
        else:
            year = int(termValue)

        startDate = datetime.strptime(
            "%s/%s-%s %s" %
            (functions.zeroPadding(altDateGroups.group("day")),
             functions.zeroPadding(altDateGroups.group("month")), year,
             altDateGroups.group("start_time")), "%d/%m-%Y %H:%M")
        endDate = datetime.strptime(
            "%s/%s-%s %s" %
            (functions.zeroPadding(altDateGroups.group("day")),
             functions.zeroPadding(altDateGroups.group("month")), year,
             altDateGroups.group("end_time")), "%d/%m-%Y %H:%M")

    # Created and updated dates
    metaProg = re.compile(values.activity_updated_regex)

    metaElements = rowMap["Systeminformation"].text.strip().split("\n")
    metaString = ""
    for me in metaElements:
        metaString = metaString + " " + me.replace("\t\t\t\t", "").replace(
            "\r", "").strip()

    metaGroups = metaProg.search(metaString)

    # Loop through the documents and append to the list
    documentTable = rowMap["Dokumenter"].find("table")
    if not documentTable == None:
        documentRows = documentTable.findAll("td")
        for documentRow in documentRows:
            # Split the size from the unit
            fileSizeProg = re.compile(values.file_size_regex)
            fileSizeGroups = fileSizeProg.search(documentRow.text)

            # Find the different document info elements
            elements = documentRow.findAll("a")

            if len(elements) > 0:
                # Filter the id from the document url
                documentProg = re.compile(values.document_url_regex)
                documentGroups = documentProg.search(elements[1]["href"])

                # Append to the list
                documents.append({
                    "name":
                    elements[1].text.encode("utf8"),
                    "size": {
                        "size": fileSizeGroups.group("size").replace(",", "."),
                        "unit": fileSizeGroups.group("unit_name")
                    },
                    "type":
                    "timetable_document",
                    "document_id":
                    documentGroups.group("document_id")
                })

    # Loop through the students and append to the list
    studentRows = rowMap["Elever"].findAll("a")
    for student, classObject in functions.grouped(studentRows, 2):
        # Filter the id from the class URL
        studentClassProg = re.compile(values.class_url_regex)
        studentClassGroups = studentClassProg.search(classObject["href"])

        # Filter the student id from the URL
        studentIdProg = re.compile(values.student_url_regex)
        studentIdGroups = studentIdProg.search(student["href"])

        students.append({
            "name": unicode(student.text),
            "class": unicode(classObject.text),
            "context_card_id": student["lectiocontextcard"],
            "student_id": studentIdGroups.group("student_id"),
            "class_id": studentClassGroups.group("class_id")
        })

    # Loop through the teams and append to the list
    for team in rowMap["Hold"].findAll("a"):
        # Filter the class name from the team name
        teamNameProg = re.compile(values.team_class_name_regex)
        teamNameGroups = teamNameProg.search(unicode(team.text))

        # Filter the id from the URL
        teamIdProg = re.compile(values.team_url_regex)
        teamIdGroups = teamIdProg.search(team["href"])

        if not teamIdGroups == None:
            # Append to the list
            teams.append({
                "class": teamNameGroups.group("class_name"),
                "team": teamNameGroups.group("team_name"),
                "name": team.text,
                "team_id": teamIdGroups.group("team_id")
            })

    # Loop through the values and append English and Computer easy readable values
    for value in showed_in_values:
        if value == u"i dags- og ugeændringer":
            showed_in.append("day_and_week_changes")
        elif value == u"Inde i berørte skemaer":
            showed_in.append("timetable")
        elif value == u"I toppen af berørte skemaer":
            showed_in.append("top_of_timetable")

    # Loop through the links and append them to the list
    for link in rowMap["Links"].findAll("a"):
        links.append({"url": link["href"], "title": unicode(link.text)})

    # Loop through the rooms and append them to the list
    for room in rowMap["Lokaler"].findAll("a"):
        # Initialize variables
        roomName = ""
        roomNumber = ""

        # Filter the number from the name
        roomNameProg = re.compile(values.room_name_regex)
        roomNameGroups = roomNameProg.search(unicode(room.text))

        if not roomNameGroups == None:
            roomName = roomNameGroups.group("room_name")
            roomNumber = roomNameGroups.group("room_number")

        # Initialize roomId RegEx
        roomIdProg = re.compile(values.room_url_regex)

        # Filter the id from the URL
        roomIdGroups = roomIdProg.search(room["href"])

        # Append the room to the list
        rooms.append({
            "name": roomName,
            "number": roomNumber,
            "room_id": roomIdGroups.group("room_id")
        })

    # Loop through the teachers and append them to the list
    for teacher in rowMap["Laerere"].findAll("a"):
        # Filter the abbrevation from the name
        teacherNameProg = re.compile(values.name_with_abbrevation_regex)
        teacherNameGroups = teacherNameProg.search(unicode(teacher.text))

        # Filter the id from the URL
        teacherIdProg = re.compile(values.teacher_url_regex)
        teacherIdGroups = teacherIdProg.search(teacher["href"])

        # Append to the list
        teachers.append({
            "context_card_id": teacher["lectiocontextcard"],
            "name": teacherNameGroups.group("name"),
            "abbrevation": teacherNameGroups.group("abbrevation"),
            "teacher_id": teacherIdGroups.group("teacher_id"),
            "school_id": teacherIdGroups.group("school_id")
        })

    # Loop over the diferent homework notes and append to the list
    for object in values.activity_homework_regexs:
        prog = re.compile(object["expression"])
        matches = prog.finditer(
            unicode(rowMap["Lektier"].text.replace("\t", "")))

        # Loop over the matches
        for element in matches:
            if object["name"] == "note":
                if not element.group("note") == "":
                    homework.append({
                        "note": element.group("note"),
                        "type": "note"
                    })
            else:
                homework.append({
                    "note": element.group("note"),
                    "class": element.group("class"),
                    "authors": element.group("writers").split(", "),
                    "name": element.group("name"),
                    "pages": element.group("pages"),
                    "subject": element.group("subject"),
                    "publisher": element.group("publisher"),
                    "type": "book"
                })
    # Initialize note variable
    note = unicode(rowMap["Note"].text)

    # Return all the information
    return {
        "status":
        "ok",
        "time":
        unicode(rowMap["Tidspunkt"].text),
        "teams":
        teams,
        "type":
        "school" if type == "Lektion" else
        "other_activity" if type == "Anden aktivitet" else "other",
        "students_education_assigned":
        students_education_assigned,
        "teachers":
        teachers,
        "rooms":
        rooms,
        "ressources":
        ressources,
        "note":
        note.encode("utf8"),
        "documents":
        documents,
        "homework":
        homework,  # Match books with the list of books
        "links":
        links,
        "students_resserved":
        "true" if students_resserved.strip() == "Ja" else "false",
        "showed_at":
        showed_in,
        "activity_status":
        "done" if status == "Afholdt" else "planned" if status == "Planlagt"
        else "cancelled" if status == "Aflyst" else "other",
        "students":
        students,
        "created": {
            "at":
            datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(metaGroups.group("created_date")),
                 functions.zeroPadding(metaGroups.group("created_month")),
                 functions.zeroPadding(metaGroups.group("created_year")),
                 functions.zeroPadding(metaGroups.group("created_hour")),
                 functions.zeroPadding(metaGroups.group("created_minute"))),
                "%d/%m-%Y %H:%M") if not metaGroups is None else "",
            "by":
            metaGroups.group("created_teacher")
            if not metaGroups is None else ""
        },
        "updated": {
            "at":
            datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(metaGroups.group("updated_date")),
                 functions.zeroPadding(metaGroups.group("updated_month")),
                 functions.zeroPadding(metaGroups.group("updated_year")),
                 functions.zeroPadding(metaGroups.group("updated_hour")),
                 functions.zeroPadding(metaGroups.group("updated_minute"))),
                "%d/%m-%Y %H:%M") if not metaGroups is None else "",
            "by":
            metaGroups.group("updated_teacher")
            if not metaGroups is None else ""
        },
        "term": {
            "value":
            soup.find("select", attrs={
                "id": "m_ChooseTerm_term"
            }).select('option[selected="selected"]')[0]["value"],
            "years_string":
            soup.find("select", attrs={
                "id": "m_ChooseTerm_term"
            }).select('option[selected="selected"]')[0].text
        },
        "date": {
            "start": startDate,
            "end": endDate
        }
    }
Beispiel #14
0
def timetable( config, url, week, year, session = False ):
	if session == False:
		cookies = {}
	else:
		if session == True:
			session = authenticate.authenticate(config)

		# Insert the session information from the auth function
		cookies = {
			"lecmobile" : "0",
			"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
			"LastLoginUserName" : session["LastLoginUserName"],
			"lectiogsc" : session["lectiogsc"],
			"LectioTicket" : session["LectioTicket"]
		}

	# Sorting settings
	settings = {

	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	# Fetch all rows in the table
	rows = soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}).findAll("tr")

	# Fetch module info, to make it possible to draw a complete timetable
	moduleInfo = []
	moduleInfoProg = re.compile(r"(?P<module_number>.*)\. (?P<start_time>.*) - (?P<end_time>.*)")

	for row in soup.findAll("div", attrs={"class" : "s2module-info"}):
		moduleInfoGroups = moduleInfoProg.match(row.text.strip().replace("modul", ""))
		if not moduleInfoGroups is None:
			start = moduleInfoGroups.group("start_time")
			if len(start) < 5:
				start = "0" + start

			end = moduleInfoGroups.group("end_time")
			if len(end) < 5:
				end = "0" + end
			moduleInfo.append({
				"module" : moduleInfoGroups.group("module_number"),
				"start" : start,
				"end" : end
			})

	# Fetch the general information celss
	generalInformationDays = rows[2].findAll("td")
	generalInformation = []

	holidayElements = []

	# Loop through all the cells, and look for information
	index = 0
	for tdRow in generalInformationDays:
		index = index+1

		dayOfWeek = index-1

		if dayOfWeek == 7:
			dayOfWeek = 0

		if index > 1:
			row = tdRow.findAll("a")

			# Loop over the link elements, in the cell
			if not row == None and len(row) > 0:
				for element in row:

					# The time module uses "0" as the first week of the year
					if int(week) == 1:
						timeWeek = 0
					else:
						# Subtract one, because 0 is the first week
						timeWeek = int(week)-1

					date = time.strptime("%s %s %s" % (str(dayOfWeek),str(timeWeek), str(year)),"%w %W %Y")
					content = element.find("div", attrs={"class" : "s2skemabrikcontent"}).findAll("span")[1]
					div = element.find("div", attrs={"class" : "s2skemabrikcontent"})

					href = None
					# If the a tag has a href, fetch it
					try:
						href = element["href"]
					except BaseException:
						pass

					if href == None:
						generalInformation.append({
							"message" : unicode(content.text),
							"date" : datetime.fromtimestamp(mktime(date)),
							"school_id" : str(config["school_id"]),
							"branch_id" : str(config["branch_id"]),
							"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
							"week" : week,
							"year" : year
						})
					else:
						# Compile the regular expression
						prog = re.compile(r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)")
						activityGroups = prog.match(element["href"])
						generalInformation.append({
							"message" : unicode(content.text),
							"activity_id" : activityGroups.group("activity_id"),
							"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
							"date" : datetime.fromtimestamp(mktime(date)),
							"school_id" : str(config["school_id"]),
							"branch_id" : str(config["branch_id"]),
							"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
							"week" : week,
							"year" : year
						})

	# Find all the day elements
	timeElements = []


	headers = []

	headerRows = rows[1].findAll("td")
	headerRows.pop(0)
	headerProg = re.compile(ur"(?P<day_name>.*) \((?P<day>.*)\/(?P<month>.*)\)")

	for row in headerRows:
		headerGroups = headerProg.match(row.text)
		headerYear = year

		if not headerGroups is None:
			if int(week) == 1 and int(headerGroups.group("month")) == 12:
				headerYear = str(int(year) - 1)

			headers.append({
				"day" : headerGroups.group("day_name"),
				"date" : datetime.strptime("%s-%s-%s %s" % (functions.zeroPadding(headerGroups.group("day")), functions.zeroPadding(headerGroups.group("month")), headerYear, "12:00"), "%d-%m-%Y %H:%M")
			})

	dayElements = rows[3].findAll("td")
	dayElements.pop(0)

	# Loop over the days
	index = 0
	dayOfWeek = 1
	for dayElement in dayElements:
		# Increment the day
		index = index+1

		# Test
		dayOfWeek = index

		if dayOfWeek == 7:
			dayOfWeek = 0

		# The time module uses "0" as the first week of the year
		if int(week) == 1:
			timeWeek = 0
		else:
			# Subtract one, because 0 is the first week
			timeWeek = int(week)-1

		# Find all the "a" tags, representing timetable elements
		timetableElements = dayElement.findAll("a")

		moduleIndex = 1

		for checkElement in dayElement.findAll(attrs={"class" : "s2module-bg"}):
			if "s2time-off" in checkElement["class"]:
				# Get time from module info elements
				holidayElements.append({
					"start" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["start"]), "%d-%m-%Y %H:%M"),
					"end" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["end"]), "%d-%m-%Y %H:%M")
				})
			moduleIndex = moduleIndex + 1

		# Loop over the timetable elements
		for timetableElement in timetableElements:

			#The type of the event, "private" or "school"
			type = None

			# Locate the different types of information in the url, and find the different RegEx groups
			expressions = [
				{"type" : "private", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/privat_aftale.aspx\?aftaleid=(?P<activity_id>[0-9]*)"},
				{"type" : "school",  "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)"},
				{"type" : "outgoing_censor", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=udgcensur&outboundCensorID=(?P<outbound_censor_id>.*)&prevurl=(?P<prev_url>.*)"},
				{"type" : "exam", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=(?P<prev_url>.*)"}
			]

			# Loop over the expressions
			groups = []
			type = "other"
			for expressionObject in expressions:
				prog = re.compile(expressionObject["expression"])
				if prog.match(timetableElement["href"]):
					groups = prog.match(timetableElement["href"])
					type = expressionObject["type"]

			# Locate the status div
			div = timetableElement.find("div", attrs={"class" : "s2skemabrikcontent"})

			# A list of the teachers
			teachers = []

			# A list of the assigned teams
			teams = []

			# Find all the info span elements
			infoSpanObjects = timetableElement.findAll("span")

			# Loop over the Info spans
			for span in infoSpanObjects:
				id = None

				# Test if property exists
				try:
					id = span["lectiocontextcard"]
				except BaseException:
					pass

				if not id == None:
					 # Team
					if span["lectiocontextcard"][0] == "H":
						# Append the team
						teams.append({
							"context_card_id" : span["lectiocontextcard"],
							"title" : unicode(span.text),
							"team_id" : span["lectiocontextcard"].replace("HE", "")
						})
					# Teacher
					elif span["lectiocontextcard"][0] == "T":
						teachers.append({
							"abbrevation" : unicode(span.text),
							"context_card_id" : span["lectiocontextcard"],
							"teacher_id" : span["lectiocontextcard"].replace("T", "")
						})

			# Get the titletext where to extract start and end times from
			title = timetableElement["title"]

			# Match the title, to extract the start and end time
			timeProg = re.compile(r"(?P<start_hour>[0-9]*):(?P<start_minute>[0-9]*) til (?P<end_hour>[0-9]*):(?P<end_minute>[0-9]*)")
			timeGroups = timeProg.search(unicode(title).encode("utf8"), re.MULTILINE)

			# Get the "main sections" separated by a double return \n\n
			mainSections = title.split("\n\n")

			# Grab the top section and split it by a single return \n
			topSection = mainSections[0].split("\n")

			# Initialize variables, assume that nothing is cancelled or changed
			isChangedOrCancelled = 0
			isCancelled = False
			isChanged = False

			# If the first item in the top section doesn't contain 'til',
			# it must be either cancelled or changed

			if not "til" in topSection[0]:
				isChangedOrCancelled = 1

				# If it says 'Aflyst!'
				if "Aflyst!" in topSection[0]:
					# It must be cancelled
					isCancelled = True
				else:
					# Otherwise it must be changed
					isChanged = True

			if not timeGroups is None:
				startTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("start_hour"),timeGroups.group("start_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
				endTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("end_hour"),timeGroups.group("end_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
			else:
				# Grab the date sections, fx: "15/5-2013 15:30 til 17:00"
				dateSections = topSection[0+isChangedOrCancelled].split(" ")

				# Grab the date, being the first (0) section
				if len(dateSections) == 4:
					startDateSection = dateSections[0]
					endDateSection = dateSections[0]

					startTimeSection = dateSections[1]
					endTimeSection = dateSections[3]
				else:
					startDateSection = dateSections[0]
					endDateSection = dateSections[3]

					startTimeSection = dateSections[1]
					endTimeSection = dateSections[4]

				currentTimezone = timezone("Europe/Copenhagen")

				alternativeDayProg = re.compile(r"(?P<day>[0-9]*)/(?P<month>[0-9]*)-(?P<year>[0-9]*)")
				alternativeStartDayGroups = alternativeDayProg.match(startDateSection.strip())
				alternativeEndDayGroups = alternativeDayProg.match(endDateSection.strip())

				startTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeStartDayGroups.group("day")), functions.zeroPadding(alternativeStartDayGroups.group("month")), alternativeStartDayGroups.group("year"), startTimeSection.strip()), "%d/%m-%Y %H:%M")
				endTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeEndDayGroups.group("day")), functions.zeroPadding(alternativeEndDayGroups.group("month")), alternativeEndDayGroups.group("year"), endTimeSection.strip()), "%d/%m-%Y %H:%M")

			roomText = ""
			try:
				if not "rer:" in topSection[3 + isChangedOrCancelled]:
					room = topSection[3 + isChangedOrCancelled].strip("Lokale: ").encode('utf-8').replace("r: ","")
			except IndexError:
				pass

			if sameDay(startTime, dayOfWeek, timeWeek, year):
				if type == "private":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"activity_id" : groups.group("activity_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "outgoing_censor":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"outbound_censor_id" : groups.group("outbound_censor_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "exam":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"test_team_id" : groups.group("test_team_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "school":
					# Add to the list
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"activity_id" : groups.group("activity_id"),
						"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
						"teachers" : teachers,
						"teams" : teams,
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"location_text" : unicode(div.text),
						"room_text" : unicode(roomText),
						"school_id" : groups.group("school_id")
					})

	return {
		"status" : "ok",
		"timetable" : timeElements,
		"information" : generalInformation,
		"module_info" : moduleInfo,
		"headers" : headers,
		"term" : {
			"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
			"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
		}
	}
Beispiel #15
0
def assignment_info ( config, session = False ):
	url = urls.assignment_info.replace("{{SCHOOL_ID}}", str(config["school_id"])).replace("{{ASSIGNMENT_ID}}", str(config["assignment_id"])).replace("{{STUDENT_ID}}",str(config["student_id"]))

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}

	# Insert the session information from the auth function
	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	settings = {}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	dateTime = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

	if soup.find("div", attrs={"id" : "m_Content_registerAfl_pa"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	teacherProg = re.compile(r"(?P<name>.*) \((?P<abbrevation>.*)\)")
	documentProg = re.compile(r"(?P<name>.*) \((?P<upload_date>.*)\)")
	teamProg = re.compile(r"(?P<class_name>.*) (?P<subject_name>.*)")

	rows = soup.find("div", attrs={"id" : "m_Content_registerAfl_pa"}).find("table").findAll("td")
	headers = soup.find("div", attrs={"id" : "m_Content_registerAfl_pa"}).find("table").findAll("th")
	rowMap = functions.mapRows(headers, rows)

	dateTimeGroups = dateTime.match(rowMap["Afleveringsfrist"].text)

	date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateTimeGroups.group("day")), functions.zeroPadding(dateTimeGroups.group("month")), dateTimeGroups.group("year"), dateTimeGroups.group("hour"), dateTimeGroups.group("minute")), "%d/%m-%Y %H:%M")

	group_assignment = False
	members = []
	teachers = []
	teams = []
	documents = []
	comments = []

	uploadRows = soup.find("table", attrs={"id" : "m_Content_RecipientGV"}).findAll("tr")
	uploadRows.pop(0)
	uploadProg = re.compile(r"\/lectio/(?P<school_id>.*)\/ExerciseFileGet.aspx\?type=(?P<type>.*)&entryid=(?P<entry_id>.*)")

	for row in uploadRows:
		elements = row.findAll("td")
		context_card_id = elements[1].find("span")["lectiocontextcard"]
		dateTimeGroups = dateTime.match(elements[0].find("span").text)
		upload_type = ""
		entry_id = ""
		if not elements[3].find("a") is None:
			uploadGroups = uploadProg.match(elements[3].find("a")["href"])
			entry_id = uploadGroups.group("entry_id")
			upload_type = "student_assignment" if uploadGroups.group("type") == "elevopgave" else "other"


		uploadDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateTimeGroups.group("day")), functions.zeroPadding(dateTimeGroups.group("month")), dateTimeGroups.group("year"), dateTimeGroups.group("hour"), dateTimeGroups.group("minute")), "%d/%m-%Y %H:%M")

		comments.append({
			"file" : {
				"name" : elements[3].find("a").text.encode("utf8") if not elements[3].find("a") is None else "",
				"entry_id" : entry_id,
				"type" : upload_type
			},
			"comment" : functions.cleanText(elements[2].text).encode("utf8"),
			"uploader" : {
				"name" : elements[1].find("span")["title"].encode("utf8") if context_card_id[0] == "T" else elements[1].find("span").text.encode("utf8"),
				"type" : "teacher" if context_card_id[0] == "T" else "student",
				"person_id" : context_card_id.replace("T", "") if context_card_id[0] == "T" else context_card_id.replace("S", ""),
				"context_card_id" : context_card_id,
				"abbrevation" : elements[1].find("span").text.encode("utf8") if context_card_id[0] == "T" else ""
			},
			"date" : uploadDate
		})

	documentIdProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/ExerciseFileGet.aspx\?type=(?P<type>.*)&exercisefileid=(?P<exercise_file_id>.*)")

	statusProg = re.compile(r"(?P<status>.*)\/ (.*): (?P<leave>.*)%")
	studentDataElements = soup.find("table", attrs={"id" : "m_Content_StudentGV"}).findAll("tr")[1].findAll("td")
	statusGroups = statusProg.match(functions.cleanText(studentDataElements[3].text).encode("utf8"))
	status = functions.cleanText(statusGroups.group("status")) if not statusGroups is None else ""
	studentData = {
		"student" : {
			"context_card_id" : studentDataElements[0].find("img")["lectiocontextcard"],
			"student_id" : studentDataElements[0].find("img")["lectiocontextcard"].replace("S", ""),
		},
		"status" : "handed" if status.strip() == "Afleveret" else "missing",
		"waiting_for" : "student" if functions.cleanText(studentDataElements[2].text) == "Elev" else "teacher" if unicode(functions.cleanText(studentDataElements[2].text)) == u"Lærer" else "none",
		"leave" : functions.cleanText(statusGroups.group("leave")) if not statusGroups is None else 0,
		"finished" : True if soup.find("input", attrs={"id" : "m_Content_StudentGV_ctl02_CompletedCB"}).has_attr("checked") and soup.find("input", attrs={"id" : "m_Content_StudentGV_ctl02_CompletedCB"})["checked"] == "checked" else False,
		"grade" : functions.cleanText(studentDataElements[5].text).encode("utf8"),
		"grade_note" : functions.cleanText(studentDataElements[6].text).encode("utf8"),
		"student_note" : functions.cleanText(studentDataElements[7].text).encode("utf8")
	}

	if u"Opgavebeskrivelse" in rowMap:
		for row in rowMap[u"Opgavebeskrivelse"].findAll("a"):
			fileNameGroups = documentProg.match(functions.cleanText(row.text.strip()))
			fileIdGroups = documentIdProg.match(row["href"])
			documentType = fileIdGroups.group("type") if not fileIdGroups is None else "",
			documents.append({
				"name" : fileNameGroups.group("name") if not fileNameGroups is None else "",
				"exercise_file_id" : fileIdGroups.group("exercise_file_id") if not fileIdGroups is None else "",
				"uploaded_date_string" : fileNameGroups.group("upload_date") if not fileNameGroups is None else "",
				"type" : "exercise_description",
				"school_id" : fileIdGroups.group("school_id") if not fileIdGroups is None else ""
			})

	for row in rowMap["Hold"].findAll("span"):
		#teamGroups = teamProg.match(row.text)
		teams.append({
			#"class_name" : unicode(teamGroups.group("class_name")) if not teamGroups is None else "",
			#"subject_name" : unicode(teamGroups.group("subject_name")) if not teamGroups is None else "",
			"team_element_name" : row.text,
			"team_element_id" : rowMap["Hold"].find("span")["lectiocontextcard"].replace("HE", ""),
			"context_card_id" : rowMap["Hold"].find("span")["lectiocontextcard"]
		})

	for row in rowMap["Ansvarlig"].findAll("span"):
		teacherGroups = teacherProg.match(row.text)
		teachers.append({
			"teacher_id" : row["lectiocontextcard"].replace("T", ""),
			"name" : teacherGroups.group("name").encode("utf8") if not teacherGroups is None else "",
			"context_card_id" : row["lectiocontextcard"],
			"abbrevation" : teacherGroups.group("abbrevation").encode("utf8") if not teacherGroups is None else ""
		})

	if soup.find("div", attrs={"id" : "m_Content_groupIsland_pa"}):
		group_assignment = True
		memberRows = soup.find("table", attrs={"id" : "m_Content_groupMembersGV"}).findAll("tr")
		memberRows.pop(0)
		memberProg = re.compile(r"(?P<name>.*), (?P<code>.*)")

		for row in memberRows:
			elements = row.findAll("td")
			memberGroups = memberProg.match(elements[0].find("span").text)
			members.append({
				"name" : memberGroups.group("name") if not memberGroups is None else "",
				"student_id" : elements[0].find("span")["lectiocontextcard"].replace("S", ""),
				"context_card_id" : elements[0].find("span")["lectiocontextcard"],
				"student_class_code" : memberGroups.group("code") if not memberGroups is None else ""
			})
	else:
		memberProg = re.compile(r"Eleven (?P<name>.*) \((?P<code>.*)\) - Opgaveaflevering")
		memberGroups = memberProg.match(soup.find(attrs={"id" : "m_HeaderContent_pageHeader"}).find("div").text)
		members.append({
			"student_id" : config["student_id"],
			"context_card_id" : soup.find(attrs={"id" : "m_HeaderContent_pageHeader"}).find("div")["lectiocontextcard"],
			"student_class_code" : memberGroups.group("code") if not memberGroups is None else "",
			"name" : memberGroups.group("name") if not memberGroups is None else "",
		})

	availableStudents = []
	availableStudentProg = re.compile(r"(?P<name>.*) \((?P<code>.*)\)")

	if not soup.find("select", attrs={"id" : "m_Content_groupStudentAddDD"}) is None:

		for row in soup.find("select", attrs={"id" : "m_Content_groupStudentAddDD"}).findAll("option"):
			progGroups = availableStudentProg.match(row.text)
			availableStudents.append({
				"name" : str(progGroups.group("name")).decode("utf8"),
				"student_id" : row["value"],
				"student_class_code" : progGroups.group("code"),
			})

	infomation = {
		"documents" : documents,
		"title" : rowMap[r"Opgavetitel"].find("span").text.encode("utf8"),
		"group_assignment" : group_assignment,
		"members" : members,
		"note" : rowMap[u"Opgavenote"].text.encode("utf8"),
		"team" : teams,
		"grading_scale" : "7-step" if rowMap[u"Karakterskala"].text == "7-trinsskala" else "13-step",
		"teachers" : teachers,
		"student_time" : rowMap[u"Elevtid"].text.replace(",", ".").replace("timer", ""),
		"date" : date,
		"in_instruction_detail" : True if rowMap[u"Iundervisningsbeskrivelse"].text == "Ja" else False,
		"comments" : comments,
		"group" : {
			"available_students" : availableStudents
		},
		"student" : studentData
	}

	#Delivered by, grade, grade_note, student_note, ended, awaiting, uploaded-documents

	return {
		"status" : "ok",
		"information" : infomation
	}
Beispiel #16
0
def assignments(config, session=False):
    if session == False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}
    else:
        url = urls.assigment_list.replace("{{SCHOOL_ID}}",
                                          str(config["school_id"])).replace(
                                              "{{STUDENT_ID}}",
                                              str(config["student_id"]))

        # Insert the session information from the auth function
        cookies = {
            "lecmobile": "0",
            "ASP.NET_SessionId": session["ASP.NET_SessionId"],
            "LastLoginUserName": session["LastLoginUserName"],
            "lectiogsc": session["lectiogsc"],
            "LectioTicket": session["LectioTicket"]
        }

        # Insert User-agent headers and the cookie information
        headers = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
            "Content-Type": "application/x-www-form-urlencoded",
            "Host": "www.lectio.dk",
            "Origin": "https://www.lectio.dk",
            "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
        }

        validationRequest = response = proxy.session.get(url, headers=headers)

        html = response.text

        soup = Soup(html)

        firstViewState = urllib.urlencode(
            {"__VIEWSTATEX": soup.find(id="__VIEWSTATEX")["value"]})

        firstEventValidationText = soup.find(id="aspnetForm").find(
            id="__EVENTVALIDATION")["value"]

        firstEventValidation = urllib.urlencode(
            {"__EVENTVALIDATION": firstEventValidationText})

        firstResponse = proxy.session.post(
            url,
            data=
            '__EVENTTARGET=s%24m%24Content%24Content%24CurrentExerciseFilterCB&__EVENTARGUMENT=&__LASTFOCUS='
            + firstEventValidation + "&" + firstViewState +
            "&time=0&__EVENTARGUMENT=&__VIEWSTATE=",
            headers=headers)

        html = firstResponse.text

        soup = Soup(html)

        viewState = urllib.urlencode(
            {"__VIEWSTATEX": soup.find(id="__VIEWSTATEX")["value"]})

        eventValidationText = soup.find(id="aspnetForm").find(
            id="__EVENTVALIDATION")["value"]

        eventValidation = urllib.urlencode(
            {"__EVENTVALIDATION": eventValidationText})

        response = proxy.session.post(
            url,
            data=
            '__EVENTTARGET=s%24m%24Content%24Content%24ShowThisTermOnlyCB&__EVENTARGUMENT=&__LASTFOCUS='
            + eventValidation + "&" + viewState +
            "&time=0&__EVENTARGUMENT=&__VIEWSTATE=",
            headers=headers)

        html = response.text

        soup = Soup(html)

        if soup.find("table", attrs={"id": "s_m_Content_Content_ExerciseGV"
                                     }) is None:
            return {"status": False, "error": "Data not found"}

        # Extract table cells
        tableRows = soup.find("table",
                              attrs={
                                  "id": "s_m_Content_Content_ExerciseGV"
                              }).findAll("tr")

        # Remove the header cell
        del tableRows[0]

        assignmentsList = []

        for row in tableRows:
            cells = row.findAll("td")

            s = re.search('([0-9]*)\/([0-9]*)-([0-9]*) ([0-9]*):([0-9]*)',
                          cells[3].text)
            date = functions.zeroPadding(
                s.group(1)) + "/" + functions.zeroPadding(
                    s.group(2)) + "-" + s.group(3) + " " + s.group(
                        4) + ":" + s.group(5)
            object = {}
            try:
                object["week"] = cells[0].find("span").text
            except BaseException:
                object["week"] = ""
            try:
                object["group"] = cells[1].find("span").text.encode("utf8")
            except BaseException:
                object["group"] = ""
            try:
                object["title"] = cells[2].find("a").text.encode("utf8")
            except BaseException:
                object["title"] = ""
            try:
                object["context_card_id"] = cells[1].find(
                    "span")["lectiocontextcard"]
                object["team_element_id"] = cells[1].find(
                    "span")["lectiocontextcard"].replace("HE", "")
            except BaseException:
                object["context_card_id"] = ""
                object["team_element_id"] = ""
            try:
                prog = re.compile(
                    r"\/lectio\/(?P<school_id>.*)\/ElevAflevering.aspx\?elevid=(?P<student_id>.*)&exerciseid=(?P<exercise_id>.*)&(?P<the_other>.*)"
                )
                urlGroups = prog.match(cells[2].find("a")["href"])
                object["exercise_id"] = urlGroups.group("exercise_id")
            except BaseException:
                object["exercise_id"] = ""
            try:
                object["link"] = cells[2].find("a")["href"]
            except BaseException:
                object["link"] = ""
            try:
                object["date"] = datetime.strptime(date, "%d/%m-%Y %H:%S")
            except BaseException:
                object["date"] = datetime.strptime("1/1-1977 00:01",
                                                   "%d/%m-%Y %H:%S")
            try:
                object["hours"] = float(cells[4].find("span").text.replace(
                    ",", ".").strip())
            except BaseException:
                object["hours"] = ""
            try:
                status = unicode(cells[5].find("span").text)
                object[
                    "status"] = "handed" if status == "Afleveret" else "missing" if status == "Mangler" else "waiting"
            except BaseException:
                object["status"] = ""
            try:
                object["leave"] = int(cells[6].text.replace(",", ".").replace(
                    "%", "").strip())
            except BaseException:
                object["leave"] = ""
            try:
                waiting_for = unicode(cells[7].find("span").text)
                object[
                    "waiting_for"] = "student" if waiting_for == "Elev" else "teacher"
            except BaseException:
                object["waiting_for"] = ""
            try:
                object["note"] = cells[8].text.encode("utf8")
            except BaseException:
                object["note"] = ""
            try:
                object["grade"] = cells[9].text.encode("utf8")
            except BaseException:
                object["grade"] = ""
            try:
                object["student_note"] = cells[10].text.encode("utf8")
            except BaseException:
                object["student_note"] = ""

            assignmentsList.append(object)

        return {
            "list": assignmentsList,
            "status": "ok",
            "term": {
                "value":
                soup.find("select", attrs={
                    "id": "s_m_ChooseTerm_term"
                }).select('option[selected="selected"]')[0]["value"],
                "years_string":
                soup.find("select", attrs={
                    "id": "s_m_ChooseTerm_term"
                }).select('option[selected="selected"]')[0].text
            }
        }
Beispiel #17
0
def student_surveys(config, session=False):
    url = "https://www.lectio.dk/lectio/%s/spoergeskema_rapport.aspx?type=mine&elevid=%s" % (
        str(config["school_id"]), str(config["student_id"]))

    if session is False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}
    # Insert the session information from the auth function
    cookies = {
        "lecmobile": "0",
        "ASP.NET_SessionId": session["ASP.NET_SessionId"],
        "LastLoginUserName": session["LastLoginUserName"],
        "lectiogsc": session["lectiogsc"],
        "LectioTicket": session["LectioTicket"]
    }

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.get(url, headers=headers)

    html = response.text

    soup = Soup(html)

    if soup.find("div", attrs={"id": "s_m_Content_Content_answer_island_pa"
                               }) is None:
        return {"status": False, "error": "Data not found"}
    surveys = []
    ids = []

    openForAnsweringProg = re.compile(
        r"\/lectio\/(?P<school_id>.*)\/spoergeskema_besvar.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)"
    )
    ownProg = re.compile(
        r"\/lectio\/(?P<school_id>.*)\/spoergeskema_rediger.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)"
    )
    openForReportingProg = re.compile(
        r"\/lectio\/(?P<school_id>.*)\/spoergeskema\/spoergeskemarapportering.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)"
    )
    dateTimeProg = re.compile(
        r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

    if soup.find(attrs={
            "id": "s_m_Content_Content_answer_island_pa"
    }).find("table").find(attrs={"class": "noRecord"}) is None:
        for row in soup.find(attrs={
                "id": "s_m_Content_Content_answer_island_pa"
        }).findAll("tr")[1:]:
            elements = row.findAll("td")
            if not elements[3].find("span") is None:
                dateGroups = dateTimeProg.match(
                    elements[3].find("span").text.strip())
            else:
                dateGroups = dateTimeProg.match(elements[3].text)
            date = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            idGroups = openForAnsweringProg.match(
                elements[0].find("a")["href"])
            id = idGroups.group("survey_id") if not idGroups is None else ""
            ids.append(id)
            surveys.append({
                "types": ["open_for_answering"],
                "survey_id":
                id,
                "anonymous":
                True if elements[2].text == "Ja" else False,
                "answer_date":
                date,
                "title":
                elements[0].text.strip().replace("\r",
                                                 "").replace("\n", "").replace(
                                                     "\t", "").encode("utf8")
            })

    if soup.find(attrs={
            "id": "s_m_Content_Content_report_island_pa"
    }).find(attrs={"class": "noRecord"}) is None:
        for row in soup.find(attrs={
                "id": "s_m_Content_Content_report_island_pa"
        }).findAll("tr")[1:]:
            elements = row.findAll("td")
            if not elements[2].find("span") is None:
                dateGroups = dateTimeProg.match(
                    elements[2].find("span").text.strip())
            else:
                dateGroups = dateTimeProg.match(elements[2].text)
            answerDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            dateGroups = dateTimeProg.match(elements[3].text)
            reportDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            dateGroups = dateTimeProg.match(elements[4].text)
            endDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            idGroups = openForReportingProg.match(
                elements[0].find("a")["href"])
            id = idGroups.group("survey_id") if not idGroups is None else ""
            ids.append(id)

            if id in ids:
                for x in surveys:
                    if x["survey_id"] == id:
                        x["answer_date"] = answerDate
                        x["report_date"] = reportDate
                        x["end_date"] = endDate
                        x["types"].append("open_for_reporting")
            else:
                surveys.append({
                    "types":
                    "open_for_reporting",
                    "survey_id":
                    id,
                    "answer_date":
                    answerDate,
                    "report_date":
                    reportDate,
                    "end_date":
                    endDate,
                    "title":
                    elements[0].text.strip().replace("\r", "").replace(
                        "\n", "").replace("\t", "").encode("utf8")
                })

    if soup.find(attrs={
            "id": "s_m_Content_Content_own_island_pa"
    }).find(attrs={"class": "noRecord"}) is None:
        for row in soup.find(attrs={
                "id": "s_m_Content_Content_own_island_pa"
        }).findAll("tr")[1:]:
            elements = row.findAll("td")
            if not elements[1].find("span") is None:
                dateGroups = dateTimeProg.match(
                    elements[1].find("span").text.strip())
            else:
                dateGroups = dateTimeProg.match(elements[1].text)
            answerDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            dateGroups = dateTimeProg.match(elements[2].text)
            reportDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            dateGroups = dateTimeProg.match(elements[3].text)
            endDate = datetime.strptime(
                "%s/%s-%s %s:%s" %
                (functions.zeroPadding(dateGroups.group("day")),
                 functions.zeroPadding(dateGroups.group("month")),
                 dateGroups.group("year"), dateGroups.group("hour"),
                 dateGroups.group("minute")),
                "%d/%m-%Y %H:%M") if not dateGroups is None else ""
            idGroups = ownProg.match(elements[0].find("a")["href"])
            id = idGroups.group("survey_id") if not idGroups is None else ""

            if id in ids:
                for x in surveys:
                    if x["survey_id"] == id:
                        x["owner_id"] = str(config["student_id"])
                        x["answer_date"] = answerDate
                        x["report_date"] = reportDate
                        x["end_date"] = endDate

            else:
                ids.append(id)
                surveys.append({
                    "types": ["closed"],
                    "survey_id":
                    id,
                    "answer_date":
                    answerDate,
                    "report_date":
                    reportDate,
                    "end_date":
                    endDate,
                    "title":
                    elements[0].text.strip().replace("\r", "").replace(
                        "\n", "").replace("\t", "").encode("utf8")
                })

        return {"status": "ok", "surveys": surveys}
Beispiel #18
0
def document ( config, session = False ):
	url = "https://www.lectio.dk/lectio/%s/dokumentrediger.aspx?dokumentid=%s" % ( str(config["school_id"]), str(config["document_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}

	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("div", attrs={"id" : "m_Content_Dokument_pa"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	offset = 0

	elements = soup.find("div", attrs={"id" : "m_Content_Dokument_pa"}).findAll("td")

	if len(elements) < 7:
		offset = 1

	creator = context_card.user({
		"context_card_id" : elements[3-offset].find("span")["lectiocontextcard"],
		"school_id" : config["school_id"]
	}, session)["user"]

	changer = elements[4-offset].find("span")["lectiocontextcard"]
	elements[4-offset].find("span").decompose()
	dateText = elements[4-offset].text.replace(" af ", "").strip()
	dateTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
	dateGroups = dateTimeProg.match(dateText)
	date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""

	connectionRows = soup.find("table", attrs={"id" : "m_Content_AffiliationsGV"}).findAll("tr")
	connectionRows.pop(0)

	connections = []

	for row in connectionRows:
		rowElements = row.findAll("td")

		data = {
			"context_card_id" : rowElements[0]["lectiocontextcard"],
			"type" : "team" if "H" in rowElements[0]["lectiocontextcard"] else "teacher" if "T" in rowElements[0]["lectiocontextcard"] else "student",
			"name" : unicode(rowElements[0].find("span").text),
			"can_edit" : True if "checked" in rowElements[1].find("input").attrs else False
		}

		if rowElements[2].find("select"):
			folder_id = rowElements[2].find("select").select('option[selected="selected"]')[0]["value"]
			data["folder_id"] = folder_id

		connections.append(data)

	document = {
		"name" : unicode(elements[0].find("a").text).replace("\t", "").replace("\n", "").replace("\r", "").strip(),
		"extension" : os.path.splitext(elements[0].find("a").text.replace("\t", "").replace("\n", "").replace("\r", "").strip())[1].replace(".", ""),
		"size" : elements[2-offset].text.replace(",", ".").replace("\t", "").replace("\n", "").replace("\r", "").strip(),
		"document_id" : str(config["document_id"]),
		"creator" : creator,
		"changer" : {
			"context_card_id" : changer,
			"type" : "teacher" if "T" in changer else "student",
			"date" : date
		},
		"comment" : soup.find("textarea", attrs={"id" : "m_Content_EditDocComments_tb"}).text.replace("\r\n",""),
		"public" : True if "checked" in soup.find("input", attrs={"id" : "m_Content_EditDocIsPublic"}).attrs else False,
		"connections" : connections,
		"term" : {
			"value" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
			"years_string" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
		}
	}

	return {
		"status" : "ok",
		"document" : document
	}
Beispiel #19
0
def survey_report(config, session=False):
    url = "https://www.lectio.dk/lectio/%s/spoergeskema/spoergeskemarapportering.aspx?id=%s" % (
        str(config["school_id"]), str(config["survey_id"]))

    if session is False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}
    # Insert the session information from the auth function
    cookies = {
        "lecmobile": "0",
        "ASP.NET_SessionId": session["ASP.NET_SessionId"],
        "LastLoginUserName": session["LastLoginUserName"],
        "lectiogsc": session["lectiogsc"],
        "LectioTicket": session["LectioTicket"]
    }

    # Insert User-agent headers and the cookie information
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "www.lectio.dk",
        "Origin": "https://www.lectio.dk",
        "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
    }

    response = proxy.session.get(url, headers=headers)

    html = response.text

    soup = Soup(html)

    if soup.find("div", attrs={"id": "m_Content_sdasd_pa"}) is None:
        return {"status": False, "error": "Data not found"}

    dateTimeProg = re.compile(
        r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

    informationTables = soup.find("div", attrs={
        "id": "m_Content_sdasd_pa"
    }).findAll("table")
    infoElements = informationTables[0].findAll("td")

    dateGroups = dateTimeProg.match(infoElements[2].text)
    answerDate = datetime.strptime(
        "%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")),
                            functions.zeroPadding(dateGroups.group("month")),
                            dateGroups.group("year"), dateGroups.group("hour"),
                            dateGroups.group("minute")),
        "%d/%m-%Y %H:%M") if not dateGroups is None else ""

    owner = context_card.user(
        {
            "context_card_id":
            infoElements[1].find("span")["lectiocontextcard"],
            "school_id": str(config["school_id"])
        }, session)["user"]

    ownerUser = {
        "context_cards": [
            infoElements[1].find("span")["lectiocontextcard"],
            owner["context_card_id"]
        ],
        "picture_id":
        owner["picture_id"],
        "name":
        owner["name"],
        "type":
        owner["type"]
    }

    if owner["type"] == "student":
        ownerUser["student_id"] = owner["student_id"]
    else:
        ownerUser["teacher_id"] = owner["teacher_id"]

    information = {
        "title": infoElements[0].text.encode("utf8"),
        "answer_date": answerDate,
        "owner": ownerUser
    }

    statElements = informationTables[1].findAll("td")

    stats = {
        "teachers": {
            "registred": statElements[1].text,
            "submitted": statElements[2].text,
            "submitted_with_unsubscribed": statElements[3].text,
            "not_submitted": statElements[4].text
        },
        "students": {
            "registred": statElements[5].text,
            "submitted": statElements[6].text,
            "submitted_with_unsubscribed": statElements[7].text,
            "not_submitted": statElements[8].text
        },
        "total": {
            "registred": statElements[9].text,
            "submitted": statElements[10].text,
            "submitted_with_unsubscribed": statElements[11].text,
            "not_submitted": statElements[12].text
        }
    }

    sections = []

    section_number = None
    section_title = None
    section_elements = []
    section_description = None

    current_question_title = None
    current_question_number = None
    current_question_description = None

    titleProg = re.compile(r"(?P<number>[\d\.\d\S]*) (?P<title>.*)")

    type = "text"
    answerStats = []
    unanswered = 0
    unansweredPercent = 0

    for row in soup.find(attrs={
            "id": "m_Content_ctl00_pa"
    }).find("table").findAll("tr", recursive=False):
        elements = row.findAll("td")

        text = elements[0].text.strip().replace("\r", "").replace("\t", "")

        if len(text) > 0:
            if not elements[0].find("h3") is None:
                titleGroups = titleProg.match(elements[0].find("h3").text)

                if not "." in titleGroups.group("number"):
                    if not section_number is None:
                        sections.append({
                            "number": section_number,
                            "title": section_title,
                            "elements": section_elements,
                            "description": section_description
                        })

                        section_number = None
                        section_title = None
                        section_elements = []
                        section_description = None

                    section_number = titleGroups.group(
                        "number") if not titleGroups is None else None
                    section_title = titleGroups.group(
                        "title") if not titleGroups is None else None
                    elements[0].find("h3").decompose()
                    section_description = elements[0].text.replace(
                        "\r\n", "").replace("\t", "").strip().strip("\n")
                else:
                    current_question_number = titleGroups.group(
                        "number") if not titleGroups is None else None
                    current_question_title = titleGroups.group(
                        "title") if not titleGroups is None else None
                    elements[0].find("h3").decompose()
                    current_question_description = elements[0].text.replace(
                        "\r\n", "").replace("\t", "").strip().strip("\n")
            else:
                tables = row.findAll("table")
                answers = []

                if tables[0].find("img") is None:
                    for x in tables[0].findAll("tr"):
                        xElements = x.findAll("td")

                        if type == "checkbox":
                            options = xElements[3].text.split(", ")
                        else:
                            options = [xElements[3].text]

                        if xElements[2].text == "anonym":
                            answers.append({
                                "anonymous": True,
                                "respondent_id": xElements[0].text,
                                "options": options
                            })
                        else:
                            answers.append({
                                "anonymous":
                                False,
                                "options":
                                options,
                                "user_context_card_id":
                                xElements[0].find("span")["lectiocontextcard"],
                                "user_text_id":
                                xElements[1].text,
                                "user_team_text":
                                xElements[2].text
                            })

                    section_elements.append({
                        "number":
                        current_question_number.encode("utf8"),
                        "title":
                        current_question_title.encode("utf8"),
                        "description":
                        current_question_description.encode("utf8"),
                        "type":
                        type,
                        "answers":
                        answers,
                        "answer_stats":
                        answerStats,
                        "unanswered":
                        str(unanswered),
                        "unanswered_percent":
                        str(unansweredPercent)
                    })

                    type = "text"
                    answerStats = []
                    unanswered = 0
                    unansweredPercent = 0
                else:
                    for x in tables[0].findAll("tr"):
                        xElements = x.findAll("td")
                        if x.find("th").text == "Ubesvaret":
                            type = "radio"
                            unanswered = xElements[1].text
                            unansweredPercent = xElements[2].text.replace(
                                " %", "")
                        else:
                            type = "checkbox"
                            answerStats.append({
                                "text":
                                x.find("th").text.encode("utf8"),
                                "number":
                                xElements[1].text,
                                "percent":
                                xElements[2].text.replace(" %", "").replace(
                                    ",", ".")
                            })

    if section_number == None:
        section_number = 1
        section_title = ""
        section_description = ""

    sections.append({
        "number": section_number,
        "title": section_title,
        "elements": section_elements,
        "description": section_description
    })

    return {
        "status": "ok",
        "information": information,
        "stats": stats,
        "sections": sections
    }
def timetable( config, url, week, year, session = False ):
	cookies = {}
	# Sorting settings
	settings = {

	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	# Fetch all rows in the table
	rows = soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}).findAll("tr")

	# Fetch module info, to make it possible to draw a complete timetable
	moduleInfo = []
	moduleInfoProg = re.compile(r"(?P<module_number>.*)\. (?P<start_time>.*) - (?P<end_time>.*)")

	for row in soup.findAll("div", attrs={"class" : "s2module-info"}):
		moduleInfoGroups = moduleInfoProg.match(row.text.strip().replace("modul", ""))
		if not moduleInfoGroups is None:
			start = moduleInfoGroups.group("start_time")
			if len(start) < 5:
				start = "0" + start

			end = moduleInfoGroups.group("end_time")
			if len(end) < 5:
				end = "0" + end
			moduleInfo.append({
				"module" : moduleInfoGroups.group("module_number"),
				"start" : start,
				"end" : end
			})

	# Fetch the general information celss
	generalInformationDays = rows[2].findAll("td")
	generalInformation = []

	holidayElements = []

	# Loop through all the cells, and look for information
	index = 0
	for tdRow in generalInformationDays:
		index = index+1
		if index > 1:
			row = tdRow.findAll("a")

			dayOfWeek = index-1

			if dayOfWeek == 7:
				dayOfWeek = 0

			# Loop over the link elements, in the cell
			if not row == None and len(row) > 0:
				for element in row:

					# The time module uses "0" as the first week of the year
					if int(week) == 1:
						timeWeek = 0
					else:
						# Subtract one, because 0 is the first week
						timeWeek = int(week)-1

					date = time.strptime("%s %s %s" % (str(dayOfWeek),str(timeWeek), str(year)),"%w %W %Y")
					content = element.find("div", attrs={"class" : "s2skemabrikcontent"}).findAll("span")[1]
					div = element.find("div", attrs={"class" : "s2skemabrikcontent"})

					href = None
					# If the a tag has a href, fetch it
					try:
						href = element["href"]
					except BaseException:
						pass

					if href == None:
						generalInformation.append({
							"message" : unicode(content.text),
							"date" : datetime.fromtimestamp(mktime(date)),
							"school_id" : str(config["school_id"]),
							"branch_id" : str(config["branch_id"]),
							"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
							"week" : week,
							"year" : year
						})
					else:
						# Compile the regular expression
						prog = re.compile(r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)")
						activityGroups = prog.match(element["href"])
						generalInformation.append({
							"message" : unicode(content.text),
							"activity_id" : activityGroups.group("activity_id"),
							"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
							"date" : datetime.fromtimestamp(mktime(date)),
							"school_id" : str(config["school_id"]),
							"branch_id" : str(config["branch_id"]),
							"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
							"week" : week,
							"year" : year
						})

	# Find all the day elements
	timeElements = []


	headers = []

	headerRows = rows[1].findAll("td")
	headerRows.pop(0)
	headerProg = re.compile(ur"(?P<day_name>.*) \((?P<day>.*)\/(?P<month>.*)\)")

	for row in headerRows:
		headerGroups = headerProg.match(row.text)
		headerYear = year

		if not headerGroups is None:
			if int(week) == 1 and int(headerGroups.group("month")) == 12:
				headerYear = str(int(year) - 1)

			headers.append({
				"day" : headerGroups.group("day_name"),
				"date" : datetime.strptime("%s-%s-%s %s" % (functions.zeroPadding(headerGroups.group("day")), functions.zeroPadding(headerGroups.group("month")), headerYear, "12:00"), "%d-%m-%Y %H:%M")
			})

	dayElements = rows[3].findAll("td")
	dayElements.pop(0)

	# Loop over the days
	index = 0
	dayOfWeek = 1
	for dayElement in dayElements:
		# Increment the day
		index = index+1

		dayOfWeek = index

		if dayOfWeek == 7:
			dayOfWeek = 0

		# The time module uses "0" as the first week of the year
		if int(week) == 1:
			timeWeek = 0
		else:
			# Subtract one, because 0 is the first week
			timeWeek = int(week)-1

		# Find all the "a" tags, representing timetable elements
		timetableElements = dayElement.findAll("a")

		moduleIndex = 1

		for checkElement in dayElement.findAll(attrs={"class" : "s2module-bg"}):
			if "s2time-off" in checkElement["class"]:
				# Get time from module info elements
				holidayElements.append({
					"start" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["start"]), "%d-%m-%Y %H:%M"),
					"end" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["end"]), "%d-%m-%Y %H:%M")
				})
			moduleIndex = moduleIndex + 1

		# Loop over the timetable elements
		for timetableElement in timetableElements:

			#The type of the event, "private" or "school"
			type = None

			# Locate the different types of information in the url, and find the different RegEx groups
			expressions = [
				{"type" : "private", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/privat_aftale.aspx\?aftaleid=(?P<activity_id>[0-9]*)"},
				{"type" : "school",  "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)"},
				{"type" : "outgoing_censor", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=udgcensur&outboundCensorID=(?P<outbound_censor_id>.*)&prevurl=(?P<prev_url>.*)"},
				{"type" : "exam", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=(?P<prev_url>.*)"}
			]

			# Loop over the expressions
			groups = []
			type = "other"
			for expressionObject in expressions:
				prog = re.compile(expressionObject["expression"])
				if prog.match(timetableElement["href"]):
					groups = prog.match(timetableElement["href"])
					type = expressionObject["type"]

			# Locate the status div
			div = timetableElement.find("div", attrs={"class" : "s2skemabrikcontent"})

			# A list of the teachers
			teachers = []

			# A list of the assigned teams
			teams = []

			# Find all the info span elements
			infoSpanObjects = timetableElement.findAll("span")

			# Loop over the Info spans
			for span in infoSpanObjects:
				id = None

				# Test if property exists
				try:
					id = span["lectiocontextcard"]
				except BaseException:
					pass

				if not id == None:
					 # Team
					if span["lectiocontextcard"][0] == "H":
						# Append the team
						teams.append({
							"context_card_id" : span["lectiocontextcard"],
							"title" : unicode(span.text),
							"team_id" : span["lectiocontextcard"].replace("HE", "")
						})
					# Teacher
					elif span["lectiocontextcard"][0] == "T":
						teachers.append({
							"abbrevation" : unicode(span.text),
							"context_card_id" : span["lectiocontextcard"],
							"teacher_id" : span["lectiocontextcard"].replace("T", "")
						})

			# Get the titletext where to extract start and end times from
			title = timetableElement["title"]

			# Match the title, to extract the start and end time
			timeProg = re.compile(r"(?P<start_hour>[0-9]*):(?P<start_minute>[0-9]*) til (?P<end_hour>[0-9]*):(?P<end_minute>[0-9]*)")
			timeGroups = timeProg.search(unicode(title).encode("utf8"), re.MULTILINE)

			# Get the "main sections" separated by a double return \n\n
			mainSections = title.split("\n\n")

			# Grab the top section and split it by a single return \n
			topSection = mainSections[0].split("\n")

			# Initialize variables, assume that nothing is cancelled or changed
			isChangedOrCancelled = 0
			isCancelled = False
			isChanged = False

			# If the first item in the top section doesn't contain 'til',
			# it must be either cancelled or changed

			if not "til" in topSection[0]:
				isChangedOrCancelled = 1

				# If it says 'Aflyst!'
				if "Aflyst!" in topSection[0]:
					# It must be cancelled
					isCancelled = True
				else:
					# Otherwise it must be changed
					isChanged = True

			if not timeGroups is None:
				startTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("start_hour"),timeGroups.group("start_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
				endTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("end_hour"),timeGroups.group("end_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
			else:
				# Grab the date sections, fx: "15/5-2013 15:30 til 17:00"
				dateSections = topSection[0+isChangedOrCancelled].split(" ")

				# Grab the date, being the first (0) section
				if len(dateSections) == 4:
					startDateSection = dateSections[0]
					endDateSection = dateSections[0]

					startTimeSection = dateSections[1]
					endTimeSection = dateSections[3]
				else:
					startDateSection = dateSections[0]
					endDateSection = dateSections[3]

					startTimeSection = dateSections[1]
					endTimeSection = dateSections[4]

				currentTimezone = timezone("Europe/Copenhagen")

				alternativeDayProg = re.compile(r"(?P<day>[0-9]*)/(?P<month>[0-9]*)-(?P<year>[0-9]*)")
				alternativeStartDayGroups = alternativeDayProg.match(startDateSection.strip())
				alternativeEndDayGroups = alternativeDayProg.match(endDateSection.strip())

				startTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeStartDayGroups.group("day")), functions.zeroPadding(alternativeStartDayGroups.group("month")), alternativeStartDayGroups.group("year"), startTimeSection.strip()), "%d/%m-%Y %H:%M")
				endTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeEndDayGroups.group("day")), functions.zeroPadding(alternativeEndDayGroups.group("month")), alternativeEndDayGroups.group("year"), endTimeSection.strip()), "%d/%m-%Y %H:%M")

			roomText = ""
			try:
				if not "rer:" in topSection[3 + isChangedOrCancelled]:
					room = topSection[3 + isChangedOrCancelled].strip("Lokale: ").encode('utf-8').replace("r: ","")
			except IndexError:
				pass

			if sameDay(startTime, dayOfWeek, timeWeek, year):
				if type == "private":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"activity_id" : groups.group("activity_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "outgoing_censor":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"outbound_censor_id" : groups.group("outbound_censor_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "exam":
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"test_team_id" : groups.group("test_team_id"),
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"school_id" : groups.group("school_id")
					})
				elif type == "school":
					# Add to the list
					timeElements.append({
						"text" : unicode(timetableElement.text),
						"activity_id" : groups.group("activity_id"),
						"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
						"teachers" : teachers,
						"teams" : teams,
						"startTime" : startTime,
						"endTime" : endTime,
						"type" : type,
						"location_text" : unicode(div.text),
						"room_text" : unicode(roomText),
						"school_id" : groups.group("school_id")
					})

	return {
		"status" : "ok",
		"timetable" : timeElements,
		"information" : generalInformation,
		"module_info" : moduleInfo,
		"headers" : headers,
		"term" : {
			"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
			"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
		}
	}
Beispiel #21
0
def student_surveys ( config, session = False ):
	url = "https://www.lectio.dk/lectio/%s/spoergeskema_rapport.aspx?type=mine&elevid=%s" % ( str(config["school_id"]), str(config["student_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}
	# Insert the session information from the auth function
	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("div", attrs={"id" : "s_m_Content_Content_answer_island_pa"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}
	surveys = []
	ids = []

	openForAnsweringProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema_besvar.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
	ownProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema_rediger.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
	openForReportingProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema\/spoergeskemarapportering.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
	dateTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

	if soup.find(attrs={"id" : "s_m_Content_Content_answer_island_pa"}).find("table").find(attrs={"class" : "noRecord"}) is None:
		for row in soup.find(attrs={"id" : "s_m_Content_Content_answer_island_pa"}).findAll("tr")[1:]:
			elements = row.findAll("td")
			if not elements[3].find("span") is None:
				dateGroups = dateTimeProg.match(elements[3].find("span").text.strip())
			else:
				dateGroups = dateTimeProg.match(elements[3].text)
			date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			idGroups = openForAnsweringProg.match(elements[0].find("a")["href"])
			id = idGroups.group("survey_id") if not idGroups is None else ""
			ids.append(id)
			surveys.append({
				"types" : ["open_for_answering"],
				"survey_id" : id,
				"anonymous" : True if elements[2].text == "Ja" else False,
				"answer_date" : date,
				"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
			})

	if soup.find(attrs={"id" : "s_m_Content_Content_report_island_pa"}).find(attrs={"class" : "noRecord"}) is None:
		for row in soup.find(attrs={"id" : "s_m_Content_Content_report_island_pa"}).findAll("tr")[1:]:
			elements = row.findAll("td")
			if not elements[2].find("span") is None:
				dateGroups = dateTimeProg.match(elements[2].find("span").text.strip())
			else:
				dateGroups = dateTimeProg.match(elements[2].text)
			answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			dateGroups = dateTimeProg.match(elements[3].text)
			reportDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			dateGroups = dateTimeProg.match(elements[4].text)
			endDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			idGroups = openForReportingProg.match(elements[0].find("a")["href"])
			id = idGroups.group("survey_id") if not idGroups is None else ""
			ids.append(id)

			if id in ids:
				for x in surveys:
					if x["survey_id"] == id:
						x["answer_date"] = answerDate
						x["report_date"] = reportDate
						x["end_date"] = endDate
						x["types"].append("open_for_reporting")
			else:
				surveys.append({
					"types" : "open_for_reporting",
					"survey_id" : id,
					"answer_date" : answerDate,
					"report_date" : reportDate,
					"end_date" : endDate,
					"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
				})

	if soup.find(attrs={"id" : "s_m_Content_Content_own_island_pa"}).find(attrs={"class" : "noRecord"}) is None:
		for row in soup.find(attrs={"id" : "s_m_Content_Content_own_island_pa"}).findAll("tr")[1:]:
			elements = row.findAll("td")
			if not elements[1].find("span") is None:
				dateGroups = dateTimeProg.match(elements[1].find("span").text.strip())
			else:
				dateGroups = dateTimeProg.match(elements[1].text)
			answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			dateGroups = dateTimeProg.match(elements[2].text)
			reportDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			dateGroups = dateTimeProg.match(elements[3].text)
			endDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
			idGroups = ownProg.match(elements[0].find("a")["href"])
			id = idGroups.group("survey_id") if not idGroups is None else ""

			if id in ids:
				for x in surveys:
					if x["survey_id"] == id:
						x["owner_id"] = str(config["student_id"])
						x["answer_date"] = answerDate
						x["report_date"] = reportDate
						x["end_date"] = endDate

			else:
				ids.append(id)
				surveys.append({
					"types" : ["closed"],
					"survey_id" : id,
					"answer_date" : answerDate,
					"report_date" : reportDate,
					"end_date" : endDate,
					"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
				})

		return {
			"status" : "ok",
			"surveys" : surveys
		}
Beispiel #22
0
def survey_report ( config, session = False ):
	url = "https://www.lectio.dk/lectio/%s/spoergeskema/spoergeskemarapportering.aspx?id=%s" % ( str(config["school_id"]), str(config["survey_id"]) )

	if session is False:
		session = authenticate.authenticate(config)

	if session == False:
		return {"status" : "error", "type" : "authenticate"}
	# Insert the session information from the auth function
	cookies = {
		"lecmobile" : "0",
		"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
		"LastLoginUserName" : session["LastLoginUserName"],
		"lectiogsc" : session["lectiogsc"],
		"LectioTicket" : session["LectioTicket"]
	}

	# Insert User-agent headers and the cookie information
	headers = {
		"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
		"Content-Type" : "application/x-www-form-urlencoded",
		"Host" : "www.lectio.dk",
		"Origin" : "https://www.lectio.dk",
		"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
	}

	response = proxy.session.get(url, headers=headers)

	html = response.text

	soup = Soup(html)

	if soup.find("div", attrs={"id" : "m_Content_sdasd_pa"}) is None:
		return {
			"status" : False,
			"error" : "Data not found"
		}

	dateTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")

	informationTables = soup.find("div", attrs={"id" : "m_Content_sdasd_pa"}).findAll("table")
	infoElements = informationTables[0].findAll("td")

	dateGroups = dateTimeProg.match(infoElements[2].text)
	answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""

	owner = context_card.user({
		"context_card_id" : infoElements[1].find("span")["lectiocontextcard"],
		"school_id" : str(config["school_id"])
	}, session)["user"]

	ownerUser = {
		"context_cards" : [infoElements[1].find("span")["lectiocontextcard"], owner["context_card_id"]],
		"picture_id" : owner["picture_id"],
		"name" : owner["name"],
		"type" : owner["type"]
	}

	if owner["type"] == "student":
		ownerUser["student_id"] = owner["student_id"]
	else:
		ownerUser["teacher_id"] = owner["teacher_id"]

	information = {
		"title" : infoElements[0].text.encode("utf8"),
		"answer_date" : answerDate,
		"owner" : ownerUser
	}

	statElements = informationTables[1].findAll("td")

	stats = {
		"teachers" : {
			"registred" : statElements[1].text,
			"submitted" : statElements[2].text,
			"submitted_with_unsubscribed" : statElements[3].text,
			"not_submitted" : statElements[4].text
		},
		"students" : {
			"registred" : statElements[5].text,
			"submitted" : statElements[6].text,
			"submitted_with_unsubscribed" : statElements[7].text,
			"not_submitted" : statElements[8].text
		},
		"total" : {
			"registred" : statElements[9].text,
			"submitted" : statElements[10].text,
			"submitted_with_unsubscribed" : statElements[11].text,
			"not_submitted" : statElements[12].text
		}
	}

	sections = []

	section_number = None
	section_title = None
	section_elements = []
	section_description = None

	current_question_title = None
	current_question_number = None
	current_question_description = None

	titleProg = re.compile(r"(?P<number>[\d\.\d\S]*) (?P<title>.*)")

	type = "text"
	answerStats = []
	unanswered = 0
	unansweredPercent = 0

	for row in soup.find(attrs={"id" : "m_Content_ctl00_pa"}).find("table").findAll("tr", recursive=False):
		elements = row.findAll("td")

		text = elements[0].text.strip().replace("\r", "").replace("\t", "")

		if len(text) > 0:
			if not elements[0].find("h3") is None:
				titleGroups = titleProg.match(elements[0].find("h3").text)

				if not "." in titleGroups.group("number"):
					if not section_number is None:
						sections.append({
							"number" : section_number,
							"title" : section_title,
							"elements" : section_elements,
							"description" : section_description
						})

						section_number = None
						section_title = None
						section_elements = []
						section_description = None

					section_number = titleGroups.group("number") if not titleGroups is None else None
					section_title = titleGroups.group("title") if not titleGroups is None else None
					elements[0].find("h3").decompose()
					section_description = elements[0].text.replace("\r\n", "").replace("\t", "").strip().strip("\n")
				else:
					current_question_number = titleGroups.group("number") if not titleGroups is None else None
					current_question_title = titleGroups.group("title") if not titleGroups is None else None
					elements[0].find("h3").decompose()
					current_question_description = elements[0].text.replace("\r\n", "").replace("\t", "").strip().strip("\n")
			else:
				tables = row.findAll("table")
				answers = []

				if tables[0].find("img") is None:
					for x in tables[0].findAll("tr"):
						xElements = x.findAll("td")

						if type == "checkbox":
							options = xElements[3].text.split(", ")
						else:
							options = [xElements[3].text]

						if xElements[2].text == "anonym":
							answers.append({
								"anonymous" : True,
								"respondent_id" : xElements[0].text,
								"options" : options
							})
						else:
							answers.append({
								"anonymous" : False,
								"options" : options,
								"user_context_card_id" : xElements[0].find("span")["lectiocontextcard"],
								"user_text_id" : xElements[1].text,
								"user_team_text" : xElements[2].text
							})


					section_elements.append({
						"number" : current_question_number.encode("utf8"),
						"title" : current_question_title.encode("utf8"),
						"description" : current_question_description.encode("utf8"),
						"type" : type,
						"answers" : answers,
						"answer_stats" : answerStats,
						"unanswered" : str(unanswered),
						"unanswered_percent" : str(unansweredPercent)
					})

					type = "text"
					answerStats = []
					unanswered = 0
					unansweredPercent = 0
				else:
					for x in tables[0].findAll("tr"):
						xElements = x.findAll("td")
						if x.find("th").text == "Ubesvaret":
							type = "radio"
							unanswered = xElements[1].text
							unansweredPercent = xElements[2].text.replace(" %", "")
						else:
							type = "checkbox"
							answerStats.append({
								"text" : x.find("th").text.encode("utf8"),
								"number" : xElements[1].text,
								"percent" : xElements[2].text.replace(" %", "").replace(",", ".")
							})

	if section_number == None:
		section_number = 1
		section_title = ""
		section_description = ""

	sections.append({
		"number" : section_number,
		"title" : section_title,
		"elements" : section_elements,
		"description" : section_description
	})

	return {
		"status" : "ok",
		"information" : information,
		"stats" : stats,
		"sections" : sections
	}
Beispiel #23
0
def userinfo(config, session=False):
    if session == False:
        session = authenticate.authenticate(config)

    if session == False:
        return {"status": "error", "type": "authenticate"}

    else:
        url = urls.front_page_url.replace("{{SCHOOL_ID}}",
                                          str(config["school_id"]))

        # Insert the session information from the auth function
        cookies = {
            "lecmobile": "0",
            "ASP.NET_SessionId": session["ASP.NET_SessionId"],
            "LastLoginUserName": session["LastLoginUserName"],
            "lectiogsc": session["lectiogsc"],
            "LectioTicket": session["LectioTicket"]
        }

        # Insert User-agent headers and the cookie information
        headers = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
            "Content-Type": "application/x-www-form-urlencoded",
            "Host": "www.lectio.dk",
            "Origin": "https://www.lectio.dk",
            "Cookie": functions.implode(cookies, "{{index}}={{value}}", "; ")
        }

        response = proxy.session.get(url, headers=headers)

        html = response.text

        soup = Soup(html)

        lectio_user_id = soup.find("div", attrs={
            "id": "s_m_masterleftDiv"
        }).find("a")["href"]
        picture_id = soup.find(
            "img", attrs={"id": "s_m_HeaderContent_picctrlthumbimage"})["src"]
        teamRows = soup.find("div",
                             attrs={
                                 "id": "s_m_Content_Content_HoldAndGroupList"
                             }).find("table").findAll("tr")

        teams = []
        buildInGroups = []
        ownGroups = []

        idProg = re.compile(
            r"\/lectio\/(?P<school_id>[0-9]*)/SkemaNy.aspx\?type=(?P<type_name>.*)&holdelementid=(?P<team_element_id>.*)"
        )
        teamProg = re.compile(r"(?P<class_name>.*) (?P<team_name>.*)")

        # Teams
        for row in teamRows[0].findAll("td")[1].findAll("a"):
            idGroups = idProg.match(row["href"])
            name = row.text
            teamGroups = teamProg.match(name)
            teams.append({
                "id":
                idGroups.group("team_element_id"),
                "class_name":
                unicode(teamGroups.group("class_name"))
                if not teamGroups is None else "",
                "team_name":
                unicode(teamGroups.group("team_name"))
                if not teamGroups is None else "",
                "name":
                name
            })

        # Build in Groups
        for row in teamRows[1].findAll("td")[1].findAll("a"):
            idGroups = idProg.match(row["href"])
            name = row.text
            buildInGroups.append({
                "id": idGroups.group("team_element_id"),
                "name": name
            })

        # Own groups
        for row in teamRows[2].findAll("td")[1].findAll("a"):
            idGroups = idProg.match(row["href"])
            id = idGroups.group("team_element_id"),
            name = row.text
            ownGroups.append({"id": id, "name": name})

        # Student name
        name = re.sub(
            r'"Eleven (\w+), (\w+) - Forside"', r'\2',
            soup.find("div", attrs={
                "id": "s_m_HeaderContent_MainTitle"
            }).text)

        # s_m_Content_Content_BookReservationInfo_ctl00_DashBoardItem2

        # Info
        informations = []
        schoolTable = soup.find(
            "table", attrs={"id": "s_m_Content_Content_importantInfo"})
        examinations = []
        grades = []
        infoObjects = schoolTable.findAll("tr")
        dayTimeProg = re.compile(
            r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)"
        )

        if not soup.find("table",
                         attrs={"id": "s_m_Content_Content_KaraktererInfo"
                                }) is None:
            for row in soup.find("table",
                                 attrs={
                                     "id": "s_m_Content_Content_KaraktererInfo"
                                 }).findAll("tr"):
                elements = row.findAll("td")
                gradeTeams = []
                gradeTeamProg = re.compile(
                    r"(?P<class_name>.*) (?P<team_name>.*)")
                dayTimeGroups = dayTimeProg.match(elements[2]["title"])

                for gradeTeam in elements[1]["title"].replace(
                        "Frigives: ", "").split(", "):
                    gradeTeamGroups = gradeTeamProg.match(gradeTeam)
                    gradeTeams.append({
                        "class_name":
                        unicode(gradeTeamGroups.group("class_name"))
                        if not gradeTeamGroups is None else "",
                        "team_name":
                        unicode(gradeTeamGroups.group("team_name"))
                        if not gradeTeamGroups is None else ""
                    })
                grades.append({
                    "date":
                    datetime.strptime(
                        "%s/%s-%s %s:%s" %
                        (functions.zeroPadding(dayTimeGroups.group("day")),
                         functions.zeroPadding(dayTimeGroups.group("month")),
                         dayTimeGroups.group("year"),
                         dayTimeGroups.group("hour"),
                         dayTimeGroups.group("minute")), "%d/%m-%Y %H:%M"),
                    "teams":
                    gradeTeams
                })

        if not soup.find("table",
                         attrs={"id": "s_m_Content_Content_EksamenerInfo"
                                }) is None:
            examObjects = soup.find("table",
                                    attrs={
                                        "id":
                                        "s_m_Content_Content_EksamenerInfo"
                                    }).findAll("tr")
        else:
            examObjects = []

        examIdProg = re.compile(
            r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=forside.aspx"
        )

        for row in examObjects:
            elements = row.findAll("td")
            examIdGroups = examIdProg.match(elements[1].find("a")["href"])
            dayTimeGroups = dayTimeProg.match(elements[2]["title"])
            examNameProg = re.compile(
                r"(?P<class_name>.*) (?P<team_name>.*) (?P<type_name>.*)\. eks\."
            )
            examNameGroups = examNameProg.match(
                unicode(elements[1].find("a").find("span").text))
            type_name = examNameGroups.group(
                "type_name") if not examNameGroups is None else ""
            examinations.append({
                "test_team_id":
                examIdGroups.group("test_team_id"),
                "school_id":
                examIdGroups.group("school_id"),
                "title":
                unicode(elements[1].find("a").find("span").text),
                "class_name":
                examNameGroups.group("class_name")
                if not examNameGroups is None else "",
                "team_name":
                examNameGroups.group("team_name")
                if not examNameGroups is None else "",
                "date":
                datetime.strptime(
                    "%s/%s-%s %s:%s" %
                    (functions.zeroPadding(dayTimeGroups.group("day")),
                     functions.zeroPadding(dayTimeGroups.group("month")),
                     dayTimeGroups.group("year"), dayTimeGroups.group("hour"),
                     dayTimeGroups.group("minute")), "%d/%m-%Y %H:%M")
            })

        if not infoObjects is None:
            for info in infoObjects:
                infoType = ""
                tds = info.findAll("td")
                if tds[0]["class"] is None or not tds[0][
                        "class"] is None and not "norecord" in tds[0]["class"]:
                    if tds[0].find("img")["src"] == "/lectio/img/prio1.auto":
                        infoType = "red"
                    elif tds[0].find("img")["src"] == "/lectio/img/prio2.auto":
                        infoType = "yellow"
                    elif tds[0].find("img")["src"] == "/lectio/img/prio3.auto":
                        infoType = "grey"
                    informations.append({
                        "text": tds[1].find("span").text,
                        "type": infoType
                    })

        nameProg = re.compile(
            r"Eleven (?P<name>.*), (?P<class_name>.*) - Forside")
        nameGroups = nameProg.match(name)

        return {
            "status":
            "ok",
            "student_id":
            lectio_user_id.replace(
                "/lectio/%s/SkemaNy.aspx?type=elev&elevid=" %
                (str(config["school_id"])), ""),
            "picture_id":
            picture_id.replace(
                "/lectio/%s/GetImage.aspx?pictureid=" %
                (str(config["school_id"])), ""),
            "teams":
            teams,
            "buildInGroups":
            buildInGroups,
            "ownGroups":
            ownGroups,
            "name":
            unicode(nameGroups.group("name"))
            if not nameGroups is None else "",
            "class_name":
            nameGroups.group("class_name") if not nameGroups is None else "",
            "information":
            informations,
            "examinations":
            examinations,
            "grades":
            grades,
            "username":
            soup.find(attrs={
                "id": "s_m_masterleftDiv"
            }).find("a").text
        }