Beispiel #1
0
def route_preview_questionnaire(questionnaireName):
    errors = []

    try:
        f = open(
            current_app.root_path + '/questionnaires/' + questionnaireName +
            ".json", 'r')
        jsonData = f.read()
        jsonData = json.loads(jsonData)
    except Exception as e:
        errors = list(e.args)

    tableName = "questionnaire_" + questionnaireName

    if questionnaireName in page_list.get_questionnaire_list():
        try:
            db.session.query(db.metadata.tables[tableName]).first()
        except Exception as e:
            errors.extend(list(e.args))
            if "(OperationalError) no such column:" in e.args[0]:
                errors.append(
                    "Click <a href=\"?fix_errors\">here</a> if you would like to try to automatically add "
                    "this column. Alternatively, you can drop the table and it will be recreated."
                )
            elif "(OperationalError) no such table:" in e.args[0]:
                errors.append(
                    "Click <a href=\"?fix_errors\">here</a> if you would like to try to automatically create "
                    "this table. Alternatively, you can restart the server and it will be created."
                )

        if 'fix_errors' in request.args:
            # Figure out what column it is by parsing errors.
            for e in errors:
                if "(OperationalError) no such column:" in e:
                    e = e.split(tableName + ".")
                    columnName = e[len(e) - 1]
                    dataType = db.metadata.tables[tableName].columns[
                        columnName].type

                    addColumn = db.DDL(
                        str.format("ALTER TABLE {} ADD COLUMN {} {}",
                                   tableName, columnName, dataType))
                    db.engine.execute(addColumn)

                    errors.append(
                        str.format(
                            u"{} {} was added to {}. "
                            u"This error should be gone when you refresh.",
                            columnName, dataType, tableName))

                if "(OperationalError) no such table:" in e:
                    db.create_all()
                    errors.append(
                        str.format(
                            u"The error should be gone if you refresh."))

    return render_template("preview_questionnaire.html",
                           q=jsonData,
                           errors=errors)
Beispiel #2
0
def create_breadcrumbs():
    """
    An optional function, the result of which can be passed to templates which extend the base ``template.html`` file.
    Pages with the same name will be represented as Page Name (3) or **Page Name (2 of 3)** when the user is on that
    particular page.

    :returns: A list of "breadcrumbs", each of which are a dictionary with a human-readable name for the path, and
     whether or not that page is the active page, meaning it should be made bold.
    """

    page_list = current_app.page_list.flat_page_list()
    currentIndex = current_app.page_list.get_index(request.path)
    crumbs = []

    # Create breadcrumbs (duplicates handled no differently than anything else)
    for i, page in enumerate(page_list):
        if page['name'] == '':
            continue

        crumb = {'name': page['name'], 'active': False}

        if page_list.index(page) == currentIndex:
            crumb['active'] = True

        crumbs.append(crumb)

    # Check for and handle any groupings of pages with the same name.
    for i, crumb in enumerate(crumbs):
        if i + 1 == len(crumbs):
            break

        crumbsInGroup = 1
        positionInGroup = 0

        if crumb['active']:
            positionInGroup = crumbsInGroup

        # Keep removing pages after the first one which have the same name.
        while crumbs[i]['name'] == crumbs[i + 1]['name']:
            removedCrumb = crumbs.pop(i + 1)

            crumbsInGroup += 1

            if removedCrumb['active']:
                crumbs[i]['active'] = True
                positionInGroup = crumbsInGroup

        if crumbsInGroup > 1 and positionInGroup > 0:
            crumbs[i]['name'] += str.format(u" ({0} of {1})", positionInGroup,
                                            crumbsInGroup)
        elif crumbsInGroup > 1:
            crumbs[i]['name'] += str.format(u" ({0})", crumbsInGroup)

    return crumbs
Beispiel #3
0
    def write_file(self, out_file_name=None):
        if len(self.plot_abc_df) == 0:
            raise Exception("There is no ABC data - call aggregate first")

        if out_file_name is None:
            out_file_name = str.format(str('{0}/{1} - All WoodyC.csv'), os.path.dirname(self._woody_file_name),
                                       os.path.splitext(os.path.basename(self._woody_file_name))[0])

        self.plot_abc_df.to_csv(out_file_name, index=False)
Beispiel #4
0
def print_low_commenters(data, amount):
    ''' print low N commenters of the thread '''
    print(str.format("\n{0} Most inactive commenters in thread {1}", amount, thread_id))
    temp = []
    try:
        # dump data to list of tuples [nickname, # of messages]
        for k in list(data.keys()):
            try:
                temp.append([k.encode('ascii', 'ignore'), len(data[k])])
            except TypeError:
                pass
    except KeyError:
        print("Printing of inactive commenters failed. Check data integrity for errors")

    temp = heapq.nsmallest(amount, temp, key=lambda x: x[1]) # sort based on amount of messages sent
    counter = 1
    for i in temp:
        print(str.format("{0}: {1} : {2} messages", str(counter).rjust(2, ' '), str(i[0]).ljust(25, ' '), str(i[1]).rjust(5, ' ')))
        counter += 1
Beispiel #5
0
def escape_csv(input):
    if isinstance(input, six.string_types):
        return str.format(
            u"\"{}\"",
            input.strip().replace("\n", " ").replace("\r",
                                                     " ").replace("\"", "'"))
    if input is None:
        return str()
    if type(input) is bool:
        return str(1) if input == True else str(0)
    else:
        return str(input)
Beispiel #6
0
def route_export_item_timing():
    questionnaires = current_app.page_list.get_questionnaire_list(True)
    header = "participantID,mTurkID"
    output = ""

    headerComplete = False

    results = db.session.query(
        db.Participant).filter(db.Participant.finished == True).all()

    for p in results:
        output += str.format(u"{},\"{}\"", p.participantID, p.mTurkID.strip())

        for qName in questionnaires:
            tag = ""

            if '/' in qName:
                qNameParts = qName.split('/')
                qName = qNameParts[0]
                tag = qNameParts[1]

            q = p.questionnaire(qName, tag)
            logs = p.questionnaire_log(qName, tag)

            qNameFull = qName
            if len(tag) > 0:
                qNameFull = "{}_{}".format(qName, tag)

            for key in sorted(logs.keys()):
                if not headerComplete:
                    header += ",{}_{}".format(qNameFull, key)

                output += ",{}".format(logs[key])

        output += "\n"
        headerComplete = True

    return render_template("export_csv.html",
                           data=str.format(u"{}\n{}", header, output))
Beispiel #7
0
def load_data(filename, head):
    ''' load a dataset from file '''
    temp = {}
    with open(filename, "r") as infile:
        temp = json.loads(infile.read())
        if temp["data_last"] < head:
            print(str.format("Data is out of date. {0} pages more since last update", head - temp["data_last"]))
            while True:
                choice = input("Do you want to update the dataset (Y/N): ")

                if choice in 'Yy':
                    downloaded_dict = download_thread_data(temp["data_last"], head)
                    if downloaded_dict is not None:
                        temp = merge_dicts(temp, downloaded_dict)
                    break
                elif choice in 'Nn':
                    return temp, False
        else:
            print("Data already up-to-date")
            return temp, True

    print(str.format("User data up-to-date and loaded for thread ID {0}", thread_id))
    return temp, True
Beispiel #8
0
    def decorated_function(*args, **kwargs):
        currentUrl = request.url.replace(request.url_root, "")

        # Don't allow users to skip things or go back. Redirect to the correct page if they try.
        if 'currentUrl' in session and currentUrl != session['currentUrl']:
            return redirect(
                str.format(u"{}/{}", current_app.config["APPLICATION_ROOT"],
                           str(session['currentUrl'])))

        # If user hasn't been here before, set their current URL to the first one in the list.
        if not 'currentUrl' in session:
            session['currentUrl'] = current_app.page_list.flat_page_list(
            )[0]['path']

            # If the user happens to be on the page they are already supposed to be at, continue as normal
            # This should only happen for the first page in the experiment.
            if session['currentUrl'] == currentUrl:
                return f(*args, **kwargs)

            return redirect(current_app.config["APPLICATION_ROOT"] + "/" +
                            session['currentUrl'])

        # Add or update their progress
        if 'participantID' in session:
            participant = db.session.query(db.Participant).get(
                session['participantID'])
            participant.lastActiveOn = datetime.datetime.now()
            db.session.commit()

            progress = db.session.query(db.Progress).filter(
                db.Progress.participantID == session['participantID'],
                db.Progress.path == currentUrl).one_or_none()

            if progress is None:
                progress = db.Progress()
                progress.participantID = session['participantID']
                progress.path = currentUrl
                progress.startedOn = datetime.datetime.now()
                db.session.add(progress)
                db.session.commit()

            if request.method == "POST":
                progress.submittedOn = datetime.datetime.now()
                db.session.commit()

        return f(*args, **kwargs)
Beispiel #9
0
    def write_agc_plot_file(self, out_file_name=None):
        """ Write AGC values etc per plot to CSV file.

        Parameters
        ----------
        out_file_name : str
            (optional) name of csv file to write to
        """

        if len(self.plot_summary_agc_df) == 0:
            raise Exception('There is no AGC data - call estimate() first')

        if out_file_name is None:
            out_file_name = str.format(str('{0}/{1} - Summary WoodyC & LitterC.csv'), os.path.dirname(self._woody_file_name),
                                       os.path.splitext(os.path.basename(self._woody_file_name))[0])

        logger.info('Writing plot AGC summary to: {0}'.format(out_file_name))
        self.plot_summary_agc_df.to_csv(out_file_name, index=False)
Beispiel #10
0
def format_species_name(species):
    """ Formats the species name into abbreviated dot notation.

    Parameters
    ----------
    species : str
        the species name e.g. 'Portulacaria afra'

    Returns
    -------
    str
        the abbreviated species name e.g. 'P.afra'
    """
    species = str(species).strip()
    comps = species.split(' ', 1)
    if len(comps) > 1:
        abbrev_species = str.format(str("{0}.{1}"), comps[0][0], comps[1].strip())
    else:
        abbrev_species = species
    return abbrev_species
Beispiel #11
0
def save_data(filename, data):
    ''' saves dataset to a file '''
    print(str.format("Saving data to {0}", filename))
    with codecs.open(filename, "w", "utf-8") as f:
        json.dump(data, f, sort_keys=True)
Beispiel #12
0
    data_last = 0
    thread_id = 0
    thread_url = options.thread_url
    start = 0
    end = 0

    if thread_url == "":
        print("No thread URL provided. Using a default one...")
        thread_url = defaul_thread_url

    data_last, thread_id = fetch_thread_header_info()
    filename = str(thread_id) + ".dat"

    # if a data file belonging to this thread exists. Read dataset from file instead of downloading it
    if os.path.isfile(filename) == True:
        print(str.format("User data for thread ID {0} found. Reading from file...", thread_id))
        userdata, should_save = load_data(filename, data_last)
        if should_save == False:
            start, end, filename = ask_pages_to_fetch(data_last)
    else:
        if data_last > 500:
            start, end, filename = ask_pages_to_fetch(data_last)
        # if user wanted to fetch a range of pages, check if that range exists and load it instead of downloading
        if os.path.isfile(filename) == True:
            print(str.format("User data for thread ID {0} found with this page range. Reading from file...", thread_id))
            userdata, should_save = load_data(filename, data_last)
        else: # just download the thread data
            userdata = download_thread_data(start, end)
            should_save = True

    # prints out the top and low commenters of the thread
Beispiel #13
0
#       c	        将十进制整数自动转换成对应的 Unicode 字符。
#       e 或者 E 	转换成科学计数法后,再格式化输出。
#       g 或 G	    自动在 e 和 f(或 E 和 F)中切换。
#       b	        将十进制数自动转换成二进制表示,再格式化输出。
#       o	        将十进制数自动转换成八进制表示,再格式化输出。
#       x 或者 X	将十进制数自动转换成十六进制表示,再格式化输出。
#       f 或者 F	转换为浮点数(默认小数点后保留 6 位),再格式化输出。
#       %	        显示百分比(默认显示小数点后 6 位)。
from builtins import str

strValue = "网站名称:{:>9s}\t网址:{:s}"
print(strValue.format("C语言中文网", "c.biancheng.net"))

hello = "hello  {1:s}"

print(str.format(hello, "111", "222"))

# 在实际开发中,数值类型有多种显示需求,比如货币形式、百分比形式等,使用 format() 方法可以将数值格式化为不同的形式。
# 以货币形式显示
print("货币形式:{:,d}".format(1000000))
# 科学计数法表示
print("科学计数法:{:E}".format(1200.12))
# 以十六进制表示
print("100的十六进制:{:#x}".format(100))
# 输出百分比形式
print("0.01的百分比表示:{:.0%}".format(0.01))

# 不指定位置
print("不指定位置:{}{}".format("hello", "world"))
# 指定位置
print("指定位置:{0}{1}".format("hello", "world"))
Beispiel #14
0
    def run (self):
        while True:
            #read one byte at a time in a loop
            by = self.sp.read(1)

            if(len(by) == 0):
                continue

            #if the byte is one of the escape characters read it in
            byte = None
            try:
                byte = by.decode("utf-8")
                #sys.stdout.write(byte)
            except UnicodeDecodeError:
                #it won't be valid if this fails
                continue

            buf = None
            if(byte == "#"):
                #see if the next character is a reset
                byte = self.sp.read(1);
                if(byte == "r"):
                    print("Fake-Radio Reset. Ready to receive radio Commands!")
                elif(byte == "w"):
                    #waiting on you to return data
                    #is this the state you should be in right now??
                    print("waiting on response");
                    pass
                elif(byte == "p"):
                    #waiting on you to return data
                    #is this the state you should be in right now??
                    print("Kernel Panic - dumping buffer");
                    #use a bug number just cause
                    buf = self.sp.read(16000);
                    print(buf.decode("utf-8"))
                    pass
                else:
                    sys.stdout.write("#" + byte.decode('utf-8'))

                continue
            elif(byte == "$"):
                #this is an actual message
                #read two more bytes to get the length
                num_bytes = struct.unpack('<H', self.sp.read(2))[0]

                #read in length number of bytes
                buf = self.sp.read(num_bytes)

                #did we get the number of bytes or timeout?
                if(len(buf) < num_bytes):
                    #we have a long enough timeout this shouldn't happen
                    #disregard this message
                    print("Received buffer shorted than expected. Discarding")
                    continue
            else:
                sys.stdout.write(byte)
                continue


            #we have a valid buffer, we should parse it
            url_len_struct = struct.unpack('<H', buf[0:2])
            url_len = url_len_struct[0]
            buf = buf[2:]
            url = buf[0:url_len].decode("utf-8")
            buf = buf[url_len:]
            num_headers = struct.unpack('<B', buf[0:1])[0]
            buf = buf[1:]
            headers = {}
            for i in range(0,num_headers):
                header_len = struct.unpack('<B',buf[0:1])[0]
                buf = buf[1:]
                header = buf[0:header_len].decode("utf-8")
                buf = buf[header_len:]
                value_len = struct.unpack('<B',buf[0:1])[0]
                buf = buf[1:]
                value = buf[0:value_len].decode("utf-8")
                buf = buf[value_len:]
                headers[header] = value


            body_len = struct.unpack('<H', buf[0:2])[0]
            buf = buf[2:]
            body = bytearray()
            body.extend(buf[:body_len])

            #now that we have parsed the buffer, post
            #split url into the first and second parts
            s_index = url.find("/")
            base = url[:s_index]
            end = url[s_index:]

            # is the base the gdp address?
            if(base == "gdp.lab11.eecs.umich.edu"):
                    stat = 0
                    reason = ""
                    print("")
                    print("#######################################################")
                    print("Trying to post to GDP")
                    index1 = 1+end[1:].find("/")
                    index2 = index1 + 1 + end[index1+1:].find("/")
                    index3 = index2 + 1 + end[index2+1:].find("/")
                    #version
                    try:
                        version = end[index1+1:index2]
                        log_name = end[index2+1:index3]
                        function = end[index3+1:]
                    except:
                        print("There was an error, aborting")
                        print("Do you have GDP installed?")
                        print("#######################################################")
                        print("")
                        continue

                    if(function == "append" or function == "Append"):
                            print("Attempting to append to log name {}".format(log_name))
                            #try to create the log. Don't know how to do this in python
                            #so instead call the shell
                            ret = os.system("gcl-create -C [email protected] -k none " + log_name)
                            if((ret >> 8) == 0):
                                print("Successfully created log")
                                stat = 201
                                reason = "OK - Log Created"
                            elif((ret >> 8) == 73):
                                print("Log already exists")
                                stat = 200
                                reason = "OK"
                            else:
                                print("An unkown gdp error(code {}) occurred).".format(str((ret >> 8))))
                                stat = 500
                                reason = "Server Error"

                            try:
                                gcl_name = gdp.GDP_NAME(log_name)
                                gcl_handle = gdp.GDP_GCL(gcl_name,gdp.GDP_MODE_AO)
                                gcl_handle.append({"signpost-data": body})
                                print("Append success")
                            except:
                                print("There was an error, aborting")
                                stat = 500
                                reason = "Server Error"
                    else:
                        print("Does not support that function")
                        stat = 503
                        reason = "Service Unkown"

                    #form the response here based on some of the stats above
                    send_buf = bytearray()
                    send_buf.extend(struct.pack('<H',stat))
                    send_buf.extend(struct.pack('<H',len(reason)))
                    send_buf.extend(reason)
                    send_buf.extend(struct.pack('<B',2))

                    send_buf.extend(struct.pack('<B',len("content-type")))
                    send_buf.extend("content-type")
                    send_buf.extend(struct.pack('<B',len("application/octet-stream")))
                    send_buf.extend("application/octet-stream")

                    send_buf.extend(struct.pack('<B',len("content-length")))
                    send_buf.extend("content-length")
                    send_buf.extend(struct.pack('<B',len("1")))
                    send_buf.extend("1")
                    send_buf.extend(struct.pack('<H',1))
                    send_buf.extend(struct.pack('<B',0x00))
                    self.sp.write(send_buf);
                    print("#######################################################")
                    print("")

            else:
                #this is a real http post. let's do it
                print("")
                print("#######################################################")
                print("Trying to post to {}".format(url))
                print("Post headers: {}".format(headers))
                is_ascii = False
                try:
                    if re.match('^[\x0a-\x7F]+$', body.decode('utf-8')):
                        is_ascii = True
                except UnicodeDecodeError:
                    pass
                if is_ascii:
                    # all bytes in body are printable characters
                    print("Post body: {}".format(body.decode('utf-8')))
                else:
                    print("Post body: <binary data, length {}>".format(len(body)))
                    print('  ' + ' '.join(map(lambda x: str.format('{:02x}', x), body)))
                print("")
                try:
                    conn = httplib.HTTPConnection(base)
                    conn.request("POST",end,body,headers)
                    response = conn.getresponse()
                except:
                    print("Post failed, please check your destination URL")
                    print("#######################################################")
                    print("")
                    continue


                #we should send this back, but for now that's good
                print("Post Succeeded! See response below.")
                print("Status: {}, Reason: {}".format(response.status,response.reason))
                body = response.read();
                print("Body: {}".format(body.decode('utf-8')))
                print("")
                #now format the response and send it back to the radio
                send_buf = bytearray()
                send_buf.extend(struct.pack('<H',response.status))
                send_buf.extend(struct.pack('<H',len(response.reason)))
                send_buf.extend(response.reason.encode('utf-8'))
                send_buf.extend(struct.pack('<B',len(response.getheaders())))
                for header in response.getheaders():
                    h0 = header[0].encode('utf-8')
                    h1 = header[1].encode('utf-8')
                    send_buf.extend(struct.pack('<B',len(h0)))
                    send_buf.extend(h0)
                    send_buf.extend(struct.pack('<B',len(h1)))
                    send_buf.extend(h1)
                send_buf.extend(struct.pack('<H',len(body)))
                send_buf.extend(body)
                self.sp.write(send_buf);
                print("Sending response back to radio")
                print("#######################################################")
                print("")
Beispiel #15
0
    def create_db_class(self):
        #print "createDBClass() for " + self.fileName

        if not self.fields:  # If list is empty
            self.fetch_fields()

        if not self.calcFields:
            self.calcFields = []

        tableName = str.format(u"questionnaire_{}", self.fileName)

        tableAttr = {
            '__tablename__':
            tableName,
            str.format(u'{0}ID', self.fileName):
            db.Column(db.Integer, primary_key=True, autoincrement=True),
            'participantID':
            db.Column(db.Integer,
                      db.ForeignKey("participant.participantID"),
                      nullable=False),
            #'participantID': db.Column(db.Integer),
            'participant':
            db.relationship("Participant", backref=tableName),
            'tag':
            db.Column(db.String(30), nullable=False, default=""),
            'timeStarted':
            db.Column(db.DateTime, nullable=False, default=db.func.now()),
            'timeEnded':
            db.Column(db.DateTime, nullable=False, default=db.func.now()),
            'duration':
            lambda self: (self.timeEnded - self.timeStarted).total_seconds()
        }

        for field in self.fields:
            if field.dataType == "integer":
                tableAttr[field.id] = db.Column(db.Integer,
                                                nullable=False,
                                                default=0)
            else:
                tableAttr[field.id] = db.Column(db.Text,
                                                nullable=False,
                                                default="")

        if "participant_calculations" in self.jsonData:

            def execute_calculation(self, calculation):
                try:
                    return eval(calculation)
                except Exception as e:
                    error = "Unable to add calculated field `{0}` to the export of questionnaire `{1}`. \n" \
                            "The preprocessed calculation string was: `{2}`\n" \
                            "The thrown exception was: {3}".format(field_name, self.__tablename__, calculation, e)
                    print(error)
                    raise Exception(error)

            for field_name, calculation in self.jsonData[
                    "participant_calculations"].items():
                self.calcFields.append(field_name)
                calculation = self.preprocess_calculation_string(calculation)

                tableAttr[
                    field_name] = lambda self, calculation=calculation: execute_calculation(
                        self, calculation)

        self.dbClass = type(self.fileName, (db.Model, ), tableAttr)
Beispiel #16
0
def route_export():
    unfinishedCount = db.session.query(db.Participant).filter(db.Participant.finished == False).count()  # For display only
    missingCount = 0
    innerJoins = db.session.query(db.Participant)  # Participants with complete data
    leftJoins = db.session.query(db.Participant)  # Participants with complete or incomplete data

    includeUnfinished = request.args.get('includeUnfinished', False)
    includeMissing = request.args.get('includeMissing', False)

    qList = page_list.get_questionnaire_list(include_tags=True)

    columns = dict()

    columns['participant'] = [
        "participantID",
        "mTurkID",
        "condition",
        "duration",
        "finished"
    ]

    calculatedColumns = dict()

    # First loop constructs the query and fetches the column names
    for qNameAndTag in qList:
        qName, qTag = questionnaire_name_and_tag(qNameAndTag)

        # The python class that describes the questionnaire
        questionnaire = questionnaires[qName]

        # Add the questionnaire's table/class to the query...
        qDBC = db.aliased(questionnaires[qName].dbClass, name=qNameAndTag)

        leftJoins = leftJoins.outerjoin(qDBC,
                              db.and_(
                                  qDBC.participantID == db.Participant.participantID,
                                  qDBC.tag == qTag
                              )).add_entity(qDBC)

        innerJoins = innerJoins.join(qDBC,
                              db.and_(
                                  qDBC.participantID == db.Participant.participantID,
                                  qDBC.tag == qTag
                              )).add_entity(qDBC)

        #attributes = questionnaires[qName].dbClass.__dict__
        #keys = sorted(attributes.keys())

        columns[qNameAndTag] = []
        calculatedColumns[qNameAndTag] = []

        # Make a list of the columns to later construct the CSV header row
        # This could also be done with questionnaire.fields
        for column in questionnaire.fields:
            columns[qNameAndTag].append(column.id)

        # Similarly, make a list of calculated columns to later be part of the CSV header row.
        for column in questionnaire.calcFields:
            calculatedColumns[qNameAndTag].append(column)

    if not includeUnfinished:
        leftJoins = leftJoins.filter(db.Participant.finished == True)
        innerJoins = innerJoins.filter(db.Participant.finished == True)

    leftJoins = leftJoins.group_by(db.Participant.participantID)
    innerJoins = innerJoins.group_by(db.Participant.participantID)

    if includeMissing:
        rows = leftJoins.all()
    else:
        rows = innerJoins.all()

    missingCount = leftJoins.filter(db.Participant.finished == True).count() - innerJoins.count()

    # Repeated measures in other tables...
    customExports = []

    for export in current_app.config['EXPORT']:
        levels, baseQuery = create_export_base_queries(export)
        customExports.append({'options': export, 'base_query': baseQuery, 'levels': levels})


    # Now that the data is loaded, construct the CSV syntax.
    # Starting with the header row...
    columnList = columns['participant']

    # Add questionnaire fields to CSV header
    for qNameAndTag in qList:
        qName, qTag = questionnaire_name_and_tag(qNameAndTag)

        for col in columns[qNameAndTag]:
            if col.startswith(qName + "_"):  # If it's already prefixed, remove it so the code below works
                col = col.replace(qName, "")

            # Prefix the column with the questionnaire name
            if qTag != "":
                col = qName + "_" + qTag + "_" + col
            else:
                col = qName + "_" + col

            columnList.append(col)

        if qTag != "":
            columnList.append(str.format(u"{}_{}_duration", qName, qTag))
        else:
            columnList.append(str.format(u"{}_duration", qName))

        # Add any calculated columns to the CSV header
        for calcCol in calculatedColumns[qNameAndTag]:
            if qTag != "":
                columnList.append(str.format(u"{}_{}_{}", qName, qTag, calcCol))
            else:
                columnList.append(str.format(u"{}_{}", qName, calcCol))

    # For custom exports, add columns based on levels determined by prior query
    for export in customExports:
        for level in export['levels']:
            for field in export['options']['fields']:
                columnList.append(str.format(u"{}_{}", field, str(level[0]).replace(" ", "_")))


    # Finally construct the CSV string.
    csvString = ",".join(columnList) + "\n"  # CSV Header

    for row in rows:
        csvString += str.format(u"{},{},{},{},{}",
                                row.Participant.participantID,
                                row.Participant.mTurkID,
                                row.Participant.condition,
                                row.Participant.duration,
                                row.Participant.finished
                                )

        for qNameAndTag in qList:
            qData = getattr(row, qNameAndTag)
            for col in columns[qNameAndTag]:
                if qData:
                    csvString += "," + escape_csv(getattr(qData, col))
                else:
                    csvString += ","

            if not qData:
                csvString += ","
            else:
                csvString += str.format(u",{}", qData.duration())  # Special case for duration

            # See if there are any calculations to include in the export.
            for col in calculatedColumns[qNameAndTag]:
                if qData:
                    csvString += "," + escape_csv(getattr(qData, col)())
                else:
                    csvString += ","

        for export in customExports:
            query = export['base_query']
            query = query.filter(db.literal_column('participantID') == row.Participant.participantID)
            customExportData = query.all()  # Running separate queries will get the job done, but be kind of slow with many participants...

            # build dictionary with one row per level...
            customExportRMs = {}

            for r in customExportData:
                classValues = getattr(r, export['options']['table'])
                groupValue = getattr(classValues, export['options']['group_by'])
                customExportRMs[groupValue] = r

            for level in export['levels']:
                for field in export['options']['fields']:
                    if not level[0] in customExportRMs:
                        csvString += ","
                        break  # Missing data!

                    classValues = getattr(customExportRMs[level[0]], export['options']['table'])

                    # The entire table class is added to the query, as well as the individual fields. So try both.
                    # Try class first due to it also having access to python properties.
                    if hasattr(classValues, field):
                        value = getattr(classValues, field)
                    else:
                        value = getattr(customExportRMs[level[0]], field)

                    if callable(value):
                        value = value()

                    csvString += "," + escape_csv(value)

        csvString += "\n"

    if request.base_url.endswith("/download"):
        return Response(csvString,
                    mimetype="text/csv",
                    headers={
                        "Content-disposition": "attachment; filename=%s.csv" % ("export_" + datetime.now().strftime("%Y-%m-%d_%H-%M"))
                    })
    else:
        return render_template("export.html",
                               data=csvString,
                               rowCount=len(rows),
                               unfinishedCount=unfinishedCount,
                               missingCount=missingCount)
Beispiel #17
0
 def _default_form_handler(dict):
     for key, value in dict:
         print(str.format("{0} => {1}", key, value))
    def handle(self, *args, **options):
        model = import_string(options["model"])

        if options["attributes"]:
            model_args = dict(
                arg.split("=")
                for arg in options.get("attributes", ).split(","))
        else:
            model_args = {}

        if options.get("owner") and self._has_field(model, "owner"):
            model_args["owner"] = Member(email=options["owner"])

        if options.get("user") and self._has_field(model, "user"):
            model_args["user"] = Member(email=options["user"])

        instance = model(**model_args)

        if isinstance(instance, Initiative):
            instance.title = "the initiative"

        if isinstance(instance, Funding):
            instance.title = "the campaign"

        if isinstance(instance, Donation):
            instance.activity = Funding(title="the campaign")
            instance.user = Member(first_name='the', last_name='donor')

        if isinstance(instance, Event):
            instance.title = "the event"

        if isinstance(instance, Participant):
            instance.activity = Event(title="the event")
            instance.user = Member(first_name='the', last_name='participant')

        if isinstance(instance, Assignment):
            instance.title = "the assignment"

        if isinstance(instance, Applicant):
            instance.activity = Assignment(title="the assignment")
            instance.user = Member(first_name='the', last_name='applicant')

        if isinstance(instance, PayoutAccount):
            instance.owner = Member(first_name='the', last_name='owner')

        machine = instance.states

        text = ""

        text += u"<h2>States</h2>"
        text += u"<em>All states this instance can be in.</em>"

        text += u"<table data-layout=\"default\"><tr><th>State Name</th><th>Description</th></tr>"

        for state in list(machine.states.values()):
            text += u"<tr><td>{}</td><td>{}</td></tr>".format(
                state.name.capitalize(), state.description)

        text += u"</table>"

        text += u"<h2>Transitions</h2>"
        text += u"<em>An instance will always move from one state to the other through a transition. " \
                u"A manual transition is initiated by a user. An automatic transition is initiated by the system, " \
                u"either through a trigger or through a side effect of a related object.</em>"
        text += u"<table data-layout=\"full-width\"><tr><th>Name</th><th>Description</th><th>From</th><th>To</th>" \
                u"<th>Manual</th><th>Conditions</th><th>Side Effects</th></tr>"

        for transition in list(machine.transitions.values()):
            str = u"<tr><td>{}</td><td>{}</td><td><ul>{}</ul></td>" \
                  u"<td>{}</td><td>{}</td><td><ul>{}</ul></td><td><ul>{}</ul></td></tr>"

            text += str.format(
                transition.name, transition.description,
                u"".join(u"<li>{}</li>".format(state.name.capitalize())
                         for state in transition.sources),
                transition.target.name.capitalize(),
                "Automatic" if transition.automatic else "Manual",
                u"".join(u"<li>{}</li>".format(get_doc(condition))
                         for condition in transition.conditions),
                u"".join(u"<li>{}</li>".format(effect(instance).to_html())
                         for effect in transition.effects))
        text += u"</table>"

        if model.triggers:
            text += u"<h2>Triggers</h2>"
            text += u"<em>These are events that get triggered when the instance changes, " \
                    u"other then through a transition. " \
                    u"Mostly it would be triggered because a property changed (e.g. a deadline).</em>"
            text += u"<table data-layout=\"full-width\">" \
                    u"<tr><th>When</th>" \
                    u"<th>Effects</th></tr>"

            for trigger in model.triggers:
                text += u"<tr><td>{}</td><td><ul>{}</ul></td></tr>".format(
                    trigger(instance), "".join([
                        "<li>{}</li>".format(effect(instance).to_html())
                        for effect in trigger(instance).effects
                    ]))
            text += u"</table>"

        if model.triggers:
            text += u"<h2>Periodic tasks</h2>"
            text += u"<em>These are events that get triggered when certain dates are passed. " \
                    u"Every 15 minutes the system checks for passing deadlines, registration dates and such.</em>"

            text += u"<table data-layout=\"full-width\">" \
                    u"<tr><th>When</th>" \
                    u"<th>Effects</th></tr>"

            for task in model.periodic_tasks:
                text += u"<tr><td>{}</td><td><ul>{}</ul></td></tr>".format(
                    task(instance), "".join([
                        "<li>{}</li>".format(effect(instance).to_html())
                        for effect in task(instance).effects
                    ]))
            text += u"</table>"
        print(text)
Beispiel #19
0
    def aggregate(self, woody_file_name='', make_marked_file=False):
        """ Estimate aboveground biomass carbon (ABC) for each plant in each plot.
         Plant measurements are read from excel spreadsheet of field data.

        Parameters
        ----------
        woody_file_name : str
            excel file containing plant measurements for each plot
        make_marked_file : bool
            create an output excel file that highlights problematic rows in woody_file_name (default = False)

        Returns
        -------
            a dict of plant ABC etc values
        """
        abc_plant_estimator = AbcPlantEstimator(model_dict=self.model_dict, surrogate_dict=self.master_surrogate_dict,
                                                wd_ratio_dict=self.wd_ratio_dict, correction_method=self.correction_method)
        ok_colour = Color(auto=True)

        wb = load_workbook(woody_file_name)
        try:
            self._woody_file_name = woody_file_name
            ws = wb.get_sheet_by_name("Consolidated data")

            self._unmodelled_species = {'unknown': {}, 'none': {}}       # keep a record of species without models
            plot_abc_list = []
            for r in ws[2:ws.max_row]:      # loop through each plant
                if r is None or r[2].value is None:
                    continue
                species = str(r[3].value).strip()

                # parse plot ID
                plot_id = str(r[0].value).strip()
                dashLoc = 2 if str(plot_id).find('-') < 0 else str(plot_id).find('-')
                plot_id = plot_id.replace('-', '').upper()
                id_num = np.int32(plot_id[dashLoc:])  # get rid of leading zeros
                plot_id = '%s%d' % (plot_id[:dashLoc], id_num)

                plot_size = np.int32(str(r[1].value).lower().split('x')[0])
                degr_class = str(r[2].value).strip()
                if degr_class == 'Degraded':
                    degr_class = 'Severe'
                elif degr_class == 'Pristine':
                    degr_class = 'Intact'

                meas_dict = OrderedDict({'ID': plot_id, 'degr_class': degr_class, 'orig_species': species,
                                      'canopy_width': r[4].value, 'canopy_length': r[5].value, 'height': r[6].value,
                                      'species': species, 'plot_size': plot_size})

                # error checking
                fields = ['canopy_width', 'canopy_length', 'height']
                fields_ok = True
                for f in fields:
                    if meas_dict[f] is None:
                        logger.warning('ID: {0}, species: {1}, has incomplete data'.format(plot_id, species))
                        meas_dict[f] = 0
                        fields_ok = False

                meas_dict['bsd'] = str(r[7].value) if (r.__len__() > 7) else ''
                abc_dict = abc_plant_estimator.estimate(meas_dict)

                for key in list(abc_dict.keys()):   # copy to meas_dict
                    meas_dict[key] = abc_dict[key]

                if make_marked_file:    # mark problem cells in excel spreadsheet
                    if species not in self.master_surrogate_dict or not fields_ok:
                        r[3].fill = PatternFill(fgColor=colors.COLOR_INDEX[5], fill_type='solid')
                        logger.debug('Marking row {0}'.format(r[3].row))
                    else:
                        r[3].fill = PatternFill(fgColor=ok_colour, fill_type='solid',)

                # gather stats on species without models
                if species not in self.master_surrogate_dict or self.master_surrogate_dict[species]['allom_species'] == 'none':
                    key = ''
                    if species not in self.master_surrogate_dict:
                        key = 'unknown'     # unknown unknowns
                    elif self.master_surrogate_dict[species]['allom_species'] == 'none':
                        key = 'none'        # known unknowns
                    if species in self._unmodelled_species:
                        self._unmodelled_species[key][species]['count'] += 1
                        self._unmodelled_species[key][species]['vol'] += abc_dict['vol'] / 1.e6
                    else:
                        self._unmodelled_species[key][species] = {'count': 1, 'vol': abc_dict['vol'] / 1.e6}

                plot_abc_list.append(meas_dict)

            self.plot_abc_df = pd.DataFrame(plot_abc_list)
            if make_marked_file:
                out_file_name = str.format('{0}/{1}_Marked.xlsx', os.path.dirname(woody_file_name),
                                           os.path.splitext(os.path.basename(woody_file_name))[0])
                wb.save(filename=out_file_name)
        finally:
            wb.close()

        logger.info('Unknown species:')
        for k, v in self._unmodelled_species['unknown'].items():
            logger.info(f'{k} {v}')
        logger.info('Unmodelled species:')
        for k, v in self._unmodelled_species['none'].items():
            logger.info(f'{k} {v}')

        return self.plot_abc_df