示例#1
0
def wireless_post():
    data = request.get_json(force=True)
    ldate = data["date"]
    ltime = data["time"]

    try:
        records = [
            Wireless(source=item["source"],
                     date=ldate,
                     time=ltime,
                     ap=item["AP"],
                     mac=item["MAC"],
                     lastip=item["Last IP"],
                     dnsname=item["DNSname"],
                     rx=item["RX"],
                     tx=item["TX"],
                     uptime=item["Uptime"],
                     lastact=item["LastACT"],
                     signalstrength=item["SignalStrength"],
                     snr=item["SNR"],
                     ccq=item["TX/RX-CCQ"],
                     throughput=item["PThroughput"]) for item in data["items"]
        ]
        with database.atomic():
            Wireless.bulk_create(records, batch_size=10)
        database.commit()
    except Exception as e:
        # make_dump(ldate, data, e)
        raise e
    return 'Ok'
示例#2
0
def add_countries():
    if config.QUERYAT_URL is None or config.GEOCODE_BATCH <= 0:
        return
    database.connect()
    with database.atomic():
        q = Change.select().where(
            (Change.country >> None) & (Change.action != 'a')
            & (~Change.changes.startswith('[[null, null]'))).limit(
                config.GEOCODE_BATCH + 200)
        count = config.GEOCODE_BATCH
        for ch in q:
            coord = ch.changed_coord()
            if coord is not None:
                country = geocode(coord[0], coord[1])
                if country is not None:
                    ch.country = country[:150]
                    ch.save()
            else:
                # print('Empty coordinates: {0} {1} {2}'.format(ch.id, ch.action, ch.changes.encode('utf-8')))
                pass

            # We request more because of these empty coordinates errors
            count -= 1
            if count <= 0:
                break
示例#3
0
def changeset():
    if 'osm_token' not in session:
        redirect(url_for('login'))

    cs_data = request.args.get('changeset')
    if not cs_data.strip():
        return redirect(url_for('front'))
    user = get_user()
    # TODO: call submit_changeset instead
    try:
        changeset = parse_changeset_id(cs_data)
        cs_date, conforms = validate_changeset(user, changeset, None,
                                               openstreetmap)
    except ValueError as e:
        flash(str(e))
        return redirect(url_for('front'))
    if not cs_date or cs_date != today():
        flash('Date of the changeset is wrong')
        return redirect(url_for('front'))
    task = Task.get(Task.user == user, Task.day == cs_date)
    try:
        last_task = Task.select(Task.day).where(
            Task.user == user,
            Task.changeset.is_null(False)).order_by(Task.day.desc()).get()
        is_streak = last_task.day == cs_date - cs_date.resolution
    except Task.DoesNotExist:
        is_streak = False
    task.changeset = changeset
    task.correct = conforms
    if is_streak:
        user.streak += 1
    else:
        user.streak = 1
    user.score += int(math.log(user.streak + 1, 2))
    if conforms:
        flash('An extra point for completing the task')
        user.score += 1
    if user.level < len(config.LEVELS) + 1:
        if user.score >= config.LEVELS[user.level - 1]:
            user.level += 1
            flash('Congratulations on gaining a level!')
    with database.atomic():
        task.save()
        user.save()
    flash('Changeset noted, thank you!')
    return redirect(url_for('front'))
示例#4
0
def submit_changeset(user, changeset, req=None):
    """Validates the changeset, records it and returns a series of messages."""
    lang = load_language_from_user('', user)['validation']
    try:
        changeset = parse_changeset_id(changeset)
        cs_date, conforms = validate_changeset(user, changeset, None, req)

        if not cs_date:
            raise ValidationError('wrong_date')

        last_task_day = get_last_task_day(user)
        if last_task_day and last_task_day >= cs_date:
            raise ValidationError('has_later_changeset')

        if cs_date < yesterday():
            raise ValidationError('old_changeset')
    except ValidationError as e:
        return [e.to_lang(lang)], False

    task = Task.get(Task.user == user, Task.day == cs_date)
    task.changeset = changeset
    task.correct = conforms

    if last_task_day == cs_date - cs_date.resolution:
        user.streak += 1
    else:
        user.streak = 1
    user.score += int(math.log(user.streak + 1, 2))
    msgs = [lang['changeset_noted'].format(user.streak)]
    if conforms:
        user.score += 1
        msgs.append(lang['extra_point'])
    if user.level < len(config.LEVELS) + 1:
        if user.score >= config.LEVELS[user.level - 1]:
            user.level += 1
            msgs.append(lang['gain_level'])

    with database.atomic():
        task.save()
        user.save()
    return msgs, True
示例#5
0
def process_notes():
    database.connect()
    if not check_update():
        return

    response = urllib2.urlopen(NOTES_URI)
    # Parsing bz2 through a temporary file
    tmpfile = TemporaryFile()
    while True:
        chunk = response.read(512*1024)
        if not chunk:
            break
        tmpfile.write(chunk)
    tmpfile.seek(0)

    with database.atomic():
        with BZ2File(tmpfile) as f:
            for event, element in etree.iterparse(f):
                if element.tag == 'note':
                    if len(element) > 0 and element[0].text and '#mapsme' in element[0].text:
                        note_id = element.get('id')
                        try:
                            ch = Change.get(Change.changeset == note_id, Change.action == 'n')
                            if element[-1].get('action') == 'closed' and ch.processed is None:
                                print('Found closed note {0}'.format(note_id))
                                ch.processed = hour_difference(ch.timestamp, element[-1].get('timestamp'))
                                ch.save()
                        except Change.DoesNotExist:
                            ch = Change()
                            ch.action = 'n'
                            ch.version = ''
                            ch.changeset = note_id
                            ch.user = element[0].get('user') if element[0].get('uid') else 'Anonymous Note'
                            print('Found new note {0} by {1}'.format(note_id, ch.user.encode('utf-8')))
                            ch.timestamp = datetime.strptime(element[0].get('timestamp'), '%Y-%m-%dT%H:%M:%SZ')
                            if element[-1].get('action') == 'closed' and ch.processed is None:
                                ch.processed = hour_difference(ch.timestamp, element[-1].get('timestamp'))
                            changes = [(element.get('lon'), element.get('lat')), {'note': element[0].text}]
                            ch.changes = json.dumps(changes, ensure_ascii=False)
                            ch.save()
                    element.clear()
示例#6
0
文件: geocode.py 项目: Zverik/mmwatch
def add_countries():
    if config.QUERYAT_URL is None or config.GEOCODE_BATCH <= 0:
        return
    database.connect()
    with database.atomic():
        q = Change.select().where((Change.country >> None) & (Change.action != 'a') & (~Change.changes.startswith('[[null, null]'))).limit(
            config.GEOCODE_BATCH + 200)
        count = config.GEOCODE_BATCH
        for ch in q:
            coord = ch.changed_coord()
            if coord is not None:
                country = geocode(coord[0], coord[1])
                if country is not None:
                    ch.country = country[:150]
                    ch.save()
            else:
                # print('Empty coordinates: {0} {1} {2}'.format(ch.id, ch.action, ch.changes.encode('utf-8')))
                pass

            # We request more because of these empty coordinates errors
            count -= 1
            if count <= 0:
                break
示例#7
0
def insert_people(crd, data):
    people = data['people']
    person_list = []
    with database.atomic():
        for person in people:
            row = mergedicts(getname(person['name']), adviser = crd)
            while True:
                try:
                    entry = db.Person.get(**row); break
                except db.Person.DoesNotExist:
                    db.Person.insert(**row).execute()

            percentage = re_PERCENTAGE.sub(r'.\1', person['ownership'])
            is_controlperson = person['controlperson']
            db.Ownership.get_or_create(person = entry,
                percentowned = percentage,
                controlperson = is_controlperson)

            title = ' '.join(x.capitalize() for x in person['title'].split())
            since = pd.to_datetime(person['since'])
            (db.Person.update(date = since, title = title)
                .where((db.Person.firstname == entry.firstname) &
                    (db.Person.lastname == entry.lastname) &
                    (db.Person.adviser == crd)).execute())
示例#8
0
    def parase_log(self):
        import re
        import datetime
        latest_log_offset = self.get_lastest_log_offset()

        matchRegex = r'^(.+) - tornado\.(\w+) - (\w+) - (\d+) (\w+) (.*?) \((.*?)\) (.*?)ms'
        queryTimeRegex = r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+),(\d+)'
        compileRegex = re.compile(matchRegex)
        timeCompiledRegex = re.compile(queryTimeRegex)
        with open(self.log_path, 'r+') as f:
            # move pointer to current offset
            f.seek(latest_log_offset, 0)
            data_source = []
            for eachline in f.readlines():

                info = compileRegex.findall(eachline)

                if len(info) == 1:
                    # extract info from length
                    timeString, logType, logLevel, requestStatus, requestType, requestURL, requestIP, requestDuration = info[
                        0]
                    # need to praser timestring first
                    year, month, day, hour, minute, second, milsecond = timeCompiledRegex.findall(
                        timeString)[0]
                    reqDatetime = datetime.datetime(int(year),
                                                    int(month),
                                                    int(day),
                                                    hour=int(hour),
                                                    minute=int(minute),
                                                    second=int(second),
                                                    microsecond=int(milsecond))

                    thisLineLog = dict(queryTime=reqDatetime,
                                       logType=logType,
                                       logLevel=logLevel,
                                       requestStatus=requestStatus,
                                       requestType=requestType,
                                       requestURL=requestURL,
                                       requestIP=requestIP,
                                       requestDuration=requestDuration)

                    data_source.append(thisLineLog)
                #print eachline
            cur_log_offset = f.tell()
            # print '# cur',cur_log_offset

            # store info
            BaseHandler.prepare(self)
            # bulk insert
            from db import database
            with database.atomic():
                for idx in range(0, len(data_source), 100):
                    log.insert_many(data_source[idx:idx + 100]).execute()

            lastest_log_offset = configOption.get(name='lastest_log_offset')

            lastest_log_offset.value = cur_log_offset
            lastest_log_offset.save()
            # save time
            import time
            lastest_log_update_time, created = configOption.get_or_create(
                name='lastest_log_update_time',
                defaults={'value': str(time.time())})
            lastest_log_update_time.value = str(time.time())
            lastest_log_update_time.save()
            BaseHandler.on_finish(self)
示例#9
0
def upload_project():
    def add_flash(pid, msg):
        flash(msg)
        return redirect(url_for('add_project', pid=pid))

    user = get_user()
    if not is_admin(user):
        return redirect(url_for('front'))
    pid = request.form['pid']
    if pid:
        pid = int(pid)
        project = Project.get(Project.id == pid)
        if not is_admin(user, project):
            return redirect(url_for('front'))
        update_audit(project)
    else:
        pid = None
        project = Project()
        project.feature_count = 0
        project.bbox = ''
        project.owner = user
    project.name = request.form['name'].strip()
    if not project.name:
        return add_flash(pid, 'Empty name - bad')
    project.title = request.form['title'].strip()
    if not project.title:
        return add_flash(pid, 'Empty title - bad')
    project.url = request.form['url'].strip()
    if not project.url:
        project.url = None
    project.description = request.form['description'].strip()
    project.can_validate = request.form.get('validate') is not None
    project.validate_modified = request.form.get('validate_modified') is not None
    project.hidden = request.form.get('is_hidden') is not None

    if 'json' not in request.files or request.files['json'].filename == '':
        if not pid:
            return add_flash(pid, 'Would not create a project without features')
        features = []
    else:
        try:
            features = json.load(codecs.getreader('utf-8')(request.files['json']))
        except ValueError as e:
            return add_flash(pid, 'Error in the uploaded features file: {}'.format(e))
        if 'features' not in features or not features['features']:
            return add_flash(pid, 'No features found in the JSON file')
        features = features['features']

    audit = None
    if 'audit' in request.files and request.files['audit'].filename:
        try:
            audit = json.load(codecs.getreader('utf-8')(request.files['audit']))
        except ValueError as e:
            return add_flash(pid, 'Error in the uploaded audit file: {}'.format(e))
        if not audit:
            return add_flash(pid, 'No features found in the audit JSON file')

    proj_audit = json.loads(project.audit or '{}')
    if audit:
        proj_audit.update(audit)
        project.audit = json.dumps(proj_audit, ensure_ascii=False)
    if features or audit or not project.updated:
        project.updated = datetime.datetime.utcnow().date()
    project.save()

    if features:
        with database.atomic():
            update_features(project, features, proj_audit)

    if project.feature_count == 0:
        project.delete_instance()
        return add_flash('Zero features in the JSON file')

    return redirect(url_for('project', name=project.name))
示例#10
0
    def startScan(self, *args, **kwargs):
        # we will use plagiarism system permission
        import probCrawler
        self._q = queues.Queue()

        # email = self.get_argument('email')
        self.query = probCrawler.crawler(queryName='')

        accountList = cronInfo.filter(isPermit=True)[:]

        accounts = set([cronAccount.account for cronAccount in accountList])
        for cronAccount in accounts:
            print(cronAccount)
            self.genTask(name=cronAccount)

        yield self._q.join(timeout=timedelta(seconds=100))

        print(self.infoDict)
        dataSource = []

        # database storage
        for name, dataDict in self.infoDict.items():
            # get total data
            tot = 0
            submitTot = 0
            for oj, resTuple in dataDict.items():
                ac, submit = resTuple
                tot += int(ac)
                submitTot += int(submit)
                # change tuple str redirecting
                dataDict[oj] = (int(ac), int(submit))
            if submitTot != 0:
                ratio = tot / submitTot
            else:
                ratio = 0
            defaultOption = (0, 0)
            saveData = dict(
                name=name,
                pojNum=dataDict.get('poj', defaultOption)[0],
                hduNum=dataDict.get('hdu', defaultOption)[0],
                zojNum=dataDict.get('zoj', defaultOption)[0],
                cfNum=int(dataDict.get('codeforces', defaultOption)[0]) +
                int(dataDict.get('CodeForces', defaultOption)[0]),
                acdreamNum=dataDict.get('acdream', defaultOption)[0],
                bzojNum=dataDict.get('hysbz', defaultOption)[0] +
                dataDict.get('bzoj', defaultOption)[0],
                otherOJNum=tot - dataDict.get('poj', defaultOption)[0] -
                dataDict.get('hdu', defaultOption)[0] -
                dataDict.get('zoj', defaultOption)[0] -
                dataDict.get('codeforces', defaultOption)[0] -
                dataDict.get('acdream', defaultOption)[0] -
                dataDict.get('hysbz', defaultOption)[0] -
                dataDict.get('bzoj', defaultOption)[0],
                totalNum=tot,
                submitNum=submitTot,
                ratio=ratio,
            )
            dataSource.append(saveData)

        from db import database
        # Insert rows 1000 at a time.
        with database.atomic():
            for idx in range(0, len(dataSource), 1000):
                acRecordArchive.insert_many(dataSource[idx:idx +
                                                       1000]).execute()

        logging.log(20, '[CRON] Executed for %s Users', dataSource)
        print('Done')