示例#1
0
	def addComment(page_id, username, comment_text):
		"""
		Adds one to the current value of the global counter.

		Returns the new value of the counter.
		"""
		first_row = utils.query("SELECT intCommentNumber FROM tb_Comments WHERE intCommentNumber = (SELECT MAX(intCommentNumber) FROM tb_Comments WHERE chvPage = ?) AND chvPage = ?", (page_id, page_id), True)
		highestCommentNumber = 0
		if first_row:
			highestCommentNumber = int(first_row['intCommentNumber'])
		utils.query("INSERT INTO tb_Comments VALUES (?, ?, CURRENT_TIMESTAMP, ?, ?, 0, NULL)", (page_id, highestCommentNumber + 1, username, comment_text))
		return 'Success'
示例#2
0
    def add_x_jobs(self, num: int = 10, cmd: str = 'exit 0', ttr: str = '10000') -> list:
        for i in range(num):
            utils.query(f"INSERT INTO jobs (ttr, cmd, status) VALUES({ttr},'{cmd}', '{self.promotion_state}')")

        actual = []
        for row in utils.query(
                f"SELECT * from jobs WHERE status in ('{self.promotion_state}') ORDER BY id"):
            actual.append(row)

        utils.query(
            f"UPDATE jobs SET status='{self.pending_state}' WHERE status IN ('{self.promotion_state}')")
        return actual
示例#3
0
    def test_failed_jobs_more_than_workers(self, n):
        num = n * 2
        actual = self.add_jobs_and_wait_statuses(status='FAILED', num=num, cmd='echo;exit 2', ttr='10100')
        sql = f"SELECT * from jobs WHERE status not in ('{self.pending_state}', '{self.promotion_state}') ORDER BY id"
        curr = utils.query(sql)
        for row in utils.query(sql):
            if row['status'] != 'TIMEOUT':
                time.sleep(3)
        for row in curr:
            self.assertEqual(row['status'], 'FAILED')

        self.assertEqual(len(actual), len(curr))
        self.assertEqual(len(curr), num)
示例#4
0
    def test_cancelled_jobs(self, num):
        actual = self.add_jobs_and_wait_statuses(status=self.running_state, num=num, cmd='echo;sleep 10000', ttr='1000000')
        utils.query(
            f"UPDATE jobs SET status='{self.cancelled_state}' WHERE status IN ('{self.running_state}')")

        wait_all_jobs('CANCELLED')

        curr = utils.query(
            f"SELECT * from jobs WHERE status not in ('{self.pending_state}', '{self.promotion_state}') ORDER BY id")
        for row in curr:
            self.assertEqual(row['status'], 'CANCELLED')
        self.assertEqual(len(actual), len(curr))
        self.assertEqual(len(curr), num)
示例#5
0
    def download_repository(self, repo_name, sha, zip_name):
        url = BASE_URL.format(name=repo_name)
        response = utils.query(url)
        data = response.text
        download_url = re.search('https://[^ ]*?\.zip', data).group(0)

        response = utils.query(download_url)
        zip_file = open(zip_name, 'wb')
        for chunk in response.iter_content(chunk_size=1024):
            if chunk:
                zip_file.write(chunk)
                zip_file.flush()
        zip_file.close()
示例#6
0
    def download_repository(self, attempt, zip_name):
        url = BASE_URL.format(name = attempt.repo.repo_name())
        response = utils.query(url)
        data = response.text
        download_url = re.search('https://[^ ]*?\.zip', data).group(0)

        response = utils.query(download_url)
        zip_file = open(zip_name, 'wb')
        for chunk in response.iter_content(chunk_size=1024): 
            if chunk:
                zip_file.write(chunk)
                zip_file.flush()
        zip_file.close()
    # DEF
示例#7
0
def query_students():
    students = {}

    while True:
        student_id = query('Student ID:')
        student_name = query('Full name:')
        student_email = query('E-mail address:')

        students[student_id] = {
            'name': student_name,
            'e-mail': student_email,
        }

        if not query_yes_no('Do you want to add another student?',
                            default=False):
            return students
示例#8
0
    def relevant_page_ids(self, fresh=False, filename='relevant_page_ids.p'):
        """
        Collect the wiki id's (page_ids) for all pages belonging to the self.relevant_categories
            * if fresh: fresh download from Wikipedia + pickled under filename
            * else: no download; page_ids loaded from pickle in filename
        """
        logging.info('Getting page_ids from relevant categories.')
        self.page_ids = set()

        if fresh:
            for category in self.relevant_categories:
                for result in query({
                        'generator': 'categorymembers',
                        'gcmtitle': 'Category:' + category,
                        'gcmlimit': '500'
                }):
                    for page in result['pages']:
                        page_id = result['pages'][page]['title']
                        self.page_ids.add(page_id)
                        if len(self.page_ids) % 1000 == 0:
                            logging.info('\t+ nb pages download: %d' %
                                         len(self.page_ids))

            self.page_ids = sorted(self.page_ids)
            with open(os.path.join(self.workspace, filename), 'wb') as out:
                pickle.dump(self.page_ids, out)

        else:
            with open(os.path.join(self.workspace, filename), 'rb') as inf:
                self.page_ids = pickle.load(inf)

        logging.info('\t+ set %d page_ids.' % len(self.page_ids))

        return self.page_ids
示例#9
0
    def relevant_page_ids(self, fresh=False, filename='../workspace/relevant_page_ids.p'):
        """
        Collect the wiki id's (page_ids) for all pages belonging to the self.relevant_categories
            * if fresh: fresh download from Wikipedia + pickled under filename
            * else: no download; page_ids loaded from pickle in filename
        """
        print('>>> Getting page_ids from relevant categories')
        self.page_ids = set()

        if fresh:
            for category in self.relevant_categories:
                for result in query( {'generator':'categorymembers', 'gcmtitle':'Category:'+category, 'gcmlimit':'500'}):
                    for page in result['pages']:
                        page_id = result['pages'][page]['title']
                        self.page_ids.add(page_id)
                        if len(self.page_ids)%1000 == 0:
                            print('\t+ nb pages download:', len(self.page_ids))

            self.page_ids = sorted(self.page_ids)
            pickle.dump(self.page_ids, open(filename, 'wb'))

        else:
            self.page_ids = pickle.load(open(filename, 'rb'))

        print('\t+ set', len(self.page_ids), 'page_ids')

        return self.page_ids
示例#10
0
    def search(self):
        # Load and parse!
        response = utils.query(self.next_url())
        soup = BeautifulSoup(response.text)
        titles = soup.find_all(class_='node-project-distribution')
        LOG.info("Found %d repositories" % len(titles))

        # Pick through the results and find repos
        for title in titles:
            name = title.contents[1].contents[0]['href'].split('/')[2]
            try:
                self.add_repository(name, '')
            except:
                traceback.print_exc()
            # Sleep for a little bit to prevent us from getting blocked
            time.sleep(DRUPAL_SLEEP)
        ## FOR

        # Figure out what is the next page that we need to load
        try:
            next_page = soup.find(class_='pager-next').contents[0]
        except:
            next_page = None
        if not next_page or not next_page.has_attr('href'):
            LOG.info("No next page link found!")
            self.crawlerStatus.next_url = None
        else:
            self.crawlerStatus.next_url = DRUPAL_HOST + next_page['href']

        # Make sure we update our crawler status
        LOG.info("Updating status for %s" % self.crawlerStatus)
        self.crawlerStatus.save()
            
        return
示例#11
0
    def search(self):
        # Load and parse!
        response = utils.query(self.next_url())
        soup = BeautifulSoup(response.text)
        titles = soup.find_all(class_='node-project-distribution')
        LOG.info("Found %d repositories" % len(titles))

        # Pick through the results and find repos
        for title in titles:
            name = title.contents[1].contents[0]['href'].split('/')[2]
            try:
                self.add_repository(name, '')
            except:
                traceback.print_exc()
            # Sleep for a little bit to prevent us from getting blocked
            time.sleep(DRUPAL_SLEEP)
        ## FOR

        # Figure out what is the next page that we need to load
        try:
            next_page = soup.find(class_='pager-next').contents[0]
        except:
            next_page = None
        if not next_page or not next_page.has_attr('href'):
            LOG.info("No next page link found!")
            self.crawlerStatus.next_url = None
        else:
            self.crawlerStatus.next_url = DRUPAL_HOST + next_page['href']

        # Make sure we update our crawler status
        LOG.info("Updating status for %s" % self.crawlerStatus)
        self.crawlerStatus.save()
            
        return
示例#12
0
def wait_all_jobs(status: str) -> None:
    """Waits for Job status with progress bar"""

    curr = utils.query(
        f"SELECT * from jobs ORDER BY id")
    for row in utils.progressbar(curr):
        wait_single_job_status(job_id=row['id'], status=status)
    def handle(self, handler_input):
        # type: (HandlerInput) -> Response

        book_title = handler_input.request_envelope.request.intent.slots[
            "title"].value

        results = utils.query(book_title)

        result_length = len(results)

        session_attr = handler_input.attributes_manager.session_attributes

        # no results
        if result_length == 0:
            speak_output = 'Sorry, I couldn\'t find that. Try saying another title.'
        # one result
        elif result_length == 1:
            session_attr["state"] = "SEARCH_RESULTS"
            speak_output = 'I have found one title: {} by {}. Would you like me to read it to you?'.format(
                results[0]['title'], results[0]['author'])
            session_attr["book"] = results[0]
        else:
            session_attr["state"] = "SEARCH_RESULTS"
            session_attr["search_results"] = results
            books = [
                book['title'] + ' by ' + book['author'] for book in results
            ]
            books_string = ' <break time="0.5s"/> , '.join(books)
            speak_output = 'I have found {} results, which would you like? {}'.format(
                result_length, books_string)

        return (handler_input.response_builder.speak(speak_output).ask(
            speak_output).response)
示例#14
0
 def num_processed(self) -> int:
     n = utils.query(
         f"select count(*) as n  from jobs WHERE status not in ('{self.pending_state}', '{self.running_state}','{self.promotion_state}')")
     num = 0
     if n:
         num = n[0]['n']
     return num
示例#15
0
def main():
    et_dingding = []
    num = 0
    etids = opetating_db.get_etid()
    et_dates = opetating_db.get_companys(etids)
    pool = Pool(15)
    pool.map(spider, et_dates)
    pool.join()
    print "et_cons:%s" % et_cons
    print "et_exts:%s" % et_exts
    lss = []
    for et_con in et_cons:  # 形成列表
        ls = tuple(et_con.values())
        lss.append(ls)
    chachongs = utils.query(lss)
    for chachong in chachongs:
        if chachong in lss:
            lss.remove(chachong)
    if lss:
        conn2 = utils.open_line_db()
        a = utils.insert_update_con(conn2, lss, 'et_contact')
    if et_exts:
        conn3 = utils.open_line_db()
        utils.insert_update_many(conn3, et_exts, 'et_email_extend')
    conn1 = utils.open_local_db()
    num = utils.insert_update_many(conn1, et_statuss, "et_info_status")

    logging.info("%s 添加钉钉数据" % utils.current_time())
    type = 2
    et_dingding.append(type)
    et_dingding.append(num)
    et_dingding.append(a)
    utils.insert_one(et_dingding)
示例#16
0
def search_result():
    keyword = request.args.get('keyword', ' ', type=str)
    start = request.args.get('start', ' ', type=int)
    res = query(keyword, start, 10)
    res = res['hits']['hits']
    print(res)
    return jsonify(res=res)
示例#17
0
 def test_success_jobs(self, num):
     actual = self.add_jobs_and_wait_statuses(status='SUCCESS', num=num, cmd='exit 0', ttr='1000')
     curr = utils.query(
         f"SELECT * from jobs WHERE status not in ('{self.pending_state}', '{self.promotion_state}') ORDER BY id")
     for row in curr:
         self.assertEqual(row['status'], 'SUCCESS')
     self.assertEqual(len(actual), len(curr))
     self.assertEqual(len(curr), num)
示例#18
0
    def backlinking_pages(self,
                          page_ids=None,
                          ignore_categories=None,
                          fresh=False,
                          filename='backlinks.p'):
        """
        Sets a dict (backlinks), with for each page_id a set of the pages backlinking to it.
            * if fresh: fresh download + pickle under outfilename
            * else: no download; backlinks loaded from pickle in filename
        In the case of new download:
            * if page_ids = None, self.page_ids is used
            * categories starting with one of the items in ignore_categories will be ignored
        """
        self.backlinks = {}

        if fresh:
            if not page_ids:
                page_ids = self.page_ids
            logging.info('Collecting backlinks for %d pages' % len(page_ids))

            if not ignore_categories:
                ignore_categories = ('Gebruiker:', 'Lijst van', 'Portaal:',
                                     'Overleg', 'Wikipedia:', 'Help:',
                                     'Categorie:')

            for idx, page_id in enumerate(page_ids):
                self.backlinks[page_id] = set()
                for result in query({
                        'action': 'query',
                        'list': 'backlinks',
                        'format': 'json',
                        'bltitle': page_id
                }):
                    for backlink in result['backlinks']:
                        backlink = backlink['title'].replace('_',
                                                             ' ')  # clean up
                        if not backlink.startswith(ignore_categories):
                            self.backlinks[page_id].add(backlink)
                if idx % 10 == 0:
                    logging.info(
                        '\t+ collected %d backlinks for %d pages' %
                        (sum([len(v)
                              for k, v in self.backlinks.items()]), idx + 1))

            # remove pages without relevant backlinks
            self.backlinks = {k: v for k, v in self.backlinks.items() if v}
            # dump for later reuse
            with open(os.path.join(self.workspace, filename), 'wb') as out:
                pickle.dump(self.backlinks, out)

        else:
            with open(os.path.join(self.workspace, filename), 'rb') as inf:
                self.backlinks = pickle.load(inf)

        logging.info(
            '\t+ loaded %d backlinks for %d pages' %
            (sum([len(v)
                  for k, v in self.backlinks.items()]), len(self.backlinks)))
示例#19
0
def search2():
    sql = "select * from %s" % (utils.Table2)
    Unu, La = utils.query(sql, utils.Table2)  # Unu为无用数据,La为表字段
    if request.method == 'POST':
        content = request.form
        data = []
        if content['Search']:
            if not data:
                sql1 = "select * from %s where concat(%s,%s,%s) like " % (
                    utils.Table2, La[0], La[1], La[2])
                sql2 = "'%"
                sql3 = "%s" % (content['Search']) + "%'"
                sql = sql1 + sql2 + sql3
                data, labels = utils.query(sql, utils.Table2)
            return render_template('BorSearch.html', labels=labels, data=data)
        else:
            return redirect(url_for('BorIfo'))
    else:
        return redirect(url_for('BorIfo'))
示例#20
0
    def test_timeout_jobs(self, num):
        actual = self.add_jobs_and_wait_statuses(status='TIMEOUT', num=num, cmd='echo;sleep 10000', ttr='3')

        curr = utils.query(
            f"SELECT * from jobs WHERE status not in ('{self.pending_state}', '{self.promotion_state}') ORDER BY id")
        for row in curr:
            self.assertEqual(row['status'], 'TIMEOUT')

        self.assertEqual(len(actual), len(curr))
        self.assertEqual(len(curr), num)
示例#21
0
def wait_single_job_status(status, job_id) -> str:
    finished = False
    for i in range(0, NUM_WORKERS + 300):
        if finished:
            break
        for row in utils.query(f"SELECT * from jobs WHERE id={job_id}"):
            if row and row['status'] in status and str(job_id) in str(row['id']):
                finished = True
                break
            time.sleep(min(i, 2))
示例#22
0
def handle(builds, environ, cmd=None):
    error = None

    try:
        settings = json.loads(query(environ, 'settings', '{}'))
    except:
        log.exception("Error in json parsing the settings variable")
        error = escape(make_trace())
        settings = {}

    for e in sorted(settings_validator.iter_errors(settings)):
        if error is None:
            error = ""
        error += str(e) + "\n"

    if error is not None:
        log.error("Errors from schema: " + error)
        yield '<result>\n<error>' + error + '</error>\n</result>\n'
    else:
        incremental = query(environ, 'incremental', '')
        incremental = incremental.lower() == 'true'

        try:
            if cmd == "makefile":
                log.info("Returning makefile")
                yield makefile(settings)
            elif cmd == "join":
                log.info("Joining existing build")
                yield "<result>\n"
                hashnumber = query(environ, 'hash', '')
                for k in join_from_hash(builds, hashnumber, incremental):
                    yield k
            else:
                log.info("Starting a new build")
                yield "<result>\n"
                for k in build(builds, text(environ), settings, incremental, "xml"):
                    yield k
        except:
            trace = make_trace()
            log.exception("Error in handle")
            yield '<trace>' + escape(trace) + '</trace>\n'
            yield '</result>\n'
示例#23
0
def updateScore(stu_id, scores):
    name2no = {}
    sql = "SELECT CO_NO,CO_NAME FROM EDUCATION_PLAN"
    result = query(sql)
    for cur in result:
        name2no[cur[1]]: cur[0]
    # 变成字典{co_name:co_no}
    for cur in scores:
        sql = "UPDATE STU_EDU_PLAN SET COMMENT = '%s' WHERE STU_NO = '%s' AND CO_NO = '%s'"\
              % (scores(cur), stu_id, name2no(cur))
        update(sql)
def get_number_of_property_references(uri: str) -> int:
    """
    Returns the number of times a property is actually used in the DB
    """
    query_result = query(f"""
        select (count(*) as ?count) where {{
            ?s <{uri}> ?o
        }}
    """)
    result = query_result.convert()['results']['bindings'][0]['count']['value']
    return int(result)
def main():
    vw = []
    sl = []
    while True:
        inp = raw_input("> ")

        inp = inp.strip()
        words = inp.split()

        cmd = words[0]
        if cmd == "/save":
            for temp in vw:
                temp.finish()
            sys.exit(1)
        if cmd == "/train":
            data = " ".join(words[1:]).strip()
            for i in range(10):
                for temp in sl:
                    temp.learn(preprocess([data]))
        elif cmd == "/query":
            data = " ".join(words[1:]).strip()
            output = set()
            for s in sl:
                output.add(postprocess(query(s, data)))
            for out in output:
                print "\t", out
        elif cmd == "/start":
            data = " ".join(words[1:]).strip()
            if os.path.isfile(data + ".1") and os.path.isfile(data + ".2") and os.path.isfile(
                            data + ".3") and os.path.isfile(data + ".4"):
                vw = [
                    pyvw.vw("--quiet -i " + data + ".1 -f "+data + ".1"),
                    pyvw.vw("--quiet -i " + data + ".2 -f "+data + ".2"),
                    pyvw.vw("--quiet -i " + data + ".3 -f "+data + ".3"),
                    pyvw.vw("--quiet -i " + data + ".4 -f "+data + ".4")
                ]
            else:
                vw = [
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".1"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".2"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".3"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".4")
                ]
            sl = [
                vw[0].init_search_task(SequenceLabeler),
                vw[1].init_search_task(SequenceLabeler2),
                vw[2].init_search_task(SequenceLabeler3),
                vw[3].init_search_task(SequenceLabeler4)
            ]
示例#26
0
def sqlmodify1():
    if request.method == 'POST':
        data = request.form
        sql = "update %s set BookNum='%s',BookName='%s',Categories='%s',Author='%s',Press='%s',PublicateDate='%s',Price='%s',IsLend='%s' where BookNum=%s" \
              % (utils.Table1, data['BookNum'], data['BookName'], data['Categories'], data['Author'], data['Press'], data['PublicateDate'], data['Price'], data['IsLend'], data['uid'])
        utils.execu(sql)
        return redirect(url_for('BooIfo'))
    else:
        uid = int(request.args.get('uid'))
        sql = "select * from %s" % (utils.Table1)
        content, labels = utils.query(sql, utils.Table1)
        sql = "select categories from %s" % (utils.Table3)
        categories = utils.getData(sql)
        return render_template('BooModify.html',
                               labels=labels,
                               content=content,
                               uid=uid,
                               categories=categories)
示例#27
0
def sqlmodify3():
    if request.method == 'POST':
        data = request.form
        sql = "update %s set CardNum='%s',CardName='%s',TypeName='%s',Sex='%s',WorkUnit='%s',Address='%s',Telephone='%s',Email='%s',RegisterDate='%s' where CardNum=%s" \
              % (utils.Table4, data['CardNum'], data['CardName'], data['TypeName'], data['Sex'], data['WorkUnit'], data['Address'], data['Telephone'], data['Email'],data['RegisterDate'], data['uid'])
        utils.execu(sql)
        return redirect(url_for('CardIfo'))
    else:
        uid = int(request.args.get('uid'))
        sql = "select * from %s" % (utils.Table4)
        content, labels = utils.query(sql, utils.Table4)
        sql = "select TypeName from %s" % (utils.Table5)
        TypeName = utils.getData(sql)
        return render_template('CardModify.html',
                               labels=labels,
                               content=content,
                               uid=uid,
                               TypeName=TypeName)
示例#28
0
def add3():
    if request.method == 'POST':
        data = request.form
        print(data)
        sql = "INSERT INTO Card (CardNum,CardName,Type,Sex,Workunit,Address,Telephone,Email,RegisterDate) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s');" \
              % (data['CardNum'], data['CardName'], data['Type'], data['Sex'],\
                 data['WorkUnit'], data['Address'], data['Telephone'], data['Email'],data['RegisterDate'])
        utils.execu(sql)
        return redirect(url_for('CardIfo'))
    else:
        sql = "select * from %s" % (utils.Table4)
        content, labels = utils.query(sql, utils.Table4)
        sql = "select TypeName from %s" % (utils.Table5)
        TypeName = utils.getData(sql)
        return render_template('CardAdd.html',
                               labels=labels,
                               content=content,
                               TypeName=TypeName)
示例#29
0
def train():
    data = utils.query(
        "select auctionDate, appraisedValue, minValue, saleValue from ctauInfo_out2 where saleValue > 0 and LENGTH(auctionDate) > 0",
        setting.CONF)

    col_names = ['appraisedValue', 'minValue']
    X = data[col_names].astype(float)
    y = data['saleValue'].astype(float)

    print(X.head())
    print(y.head())

    model = SLR('./data')
    model.train(X, y)
    print("training model...")

    model.save()
    print("Saved model to disk")
示例#30
0
def add1():
    if request.method == 'POST':
        data = request.form
        sql = "INSERT INTO Book (BookNum,BookName,Categories,Author,Press,PublicateDate,Price,IsLend) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s')" \
              % (data['BookNum'], data['BookName'], data['Categories'], data['Author'],\
                 data['Press'], data['PublicateDate'], data['Price'], data['IsLend'])
        utils.execu(sql)
        sql = "update Category set total=total+1 where Categories=%s" % data[
            'Categories']
        utils.execu(sql)
        return redirect(url_for('BooIfo'))
    else:
        sql = "select * from %s" % (utils.Table1)
        content, labels = utils.query(sql, utils.Table1)
        sql = "select categories from %s" % (utils.Table3)
        categories = utils.getData(sql)
        return render_template('BooAdd.html',
                               labels=labels,
                               content=content,
                               categories=categories)
示例#31
0
def api(request):
    """HTTP Cloud Function.
    Args:
        request (flask.Request): The request object.
        <https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data>
    Returns:
        The response text, or any set of values that can be turned into a
        Response object using `make_response`
        <https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>.
    """
    request_json = request.get_json(silent=True)

    if request_json and 'columns' in request_json and 'filters' in request_json:
        columns, filters = request_json['columns'], request_json['filters']
        try:
            return escape(query(columns, filters).to_csv())
        except Exception as e:
            return 'error:' + str(e)
    else:
        return escape('error: missing parameters')
示例#32
0
def updateDatabase(stu_id, train_plan):
    """
    功能: 用户在“培养计划”界面点击“提交”按钮后,使用最新“计划树”信息更新数据库
    :param stu_id: 唯一标识学生的id
    :param train_plan: “培养计划”界面“计划树”数据的json格式
    :return: 无
    """
    data = train_plan['children']
    # 120门课
    array_finished = [0] * 120
    # print(array_finish)
    for data_children in data:
        data_children = data_children['children']
        for data_children_child1 in data_children:
            data_children_child1 = data_children_child1['children']
            for data_children_child in data_children_child1:
                name = data_children_child['children'][0]['name']
                color = data_children_child['children'][0]['itemStyle']
                sql = "SELECT CO_100 FROM EDUCATION_STU_PLAN WHERE STU_NO = '%s'" % stu_id
                co_100 = query(sql)
                co_100 = [0][0]

                if color == 'red':
                    array_finished[int(co_100)] == 0
                else:
                    array_finished[int(co_100)] == 1
    # 第2层遍历

        finished_co = ''
        for i in range(0, 119):
            if array_finished[i] == 1:
                finished_co += '1'
            else:
                finished_co += '0'

        sql = "UPDATE STU_EDU_PLAN SET FINISHED_CO = '%s' WHERE STU_NO = '%s'" % (
            finished_co, stu_id)
        update(sql)
示例#33
0
    def backlinking_pages(self, page_ids=None, ignore_categories=None, fresh=False, filename='../workspace/backlinks.p'):
        """
        Sets a dict (backlinks), with for each page_id a set of the pages backlinking to it.
            * if fresh: fresh download + pickle under outfilename
            * else: no download; backlinks loaded from pickle in filename
        In the case of new download:
            * if page_ids = None, self.page_ids is used
            * categories starting with one of the items in ignore_categories will be ignored
        """
        self.backlinks = {}

        if fresh:
            if not page_ids:
                page_ids = self.page_ids
            print('>>> Collecting backlinks for', len(page_ids), 'pages')

            if not ignore_categories:
                ignore_categories = ('Gebruiker:', 'Lijst van', 'Portaal:', 'Overleg', 'Wikipedia:', 'Help:', 'Categorie:')

            for idx, page_id in enumerate(page_ids):
                self.backlinks[page_id] = set()
                for result in query({'action':'query', 'list':'backlinks', 'format':'json', 'bltitle':page_id}):
                    for backlink in result['backlinks']:
                        backlink = backlink['title'].replace('_', ' ') # clean up
                        if not backlink.startswith(ignore_categories):
                            self.backlinks[page_id].add(backlink)
                if idx % 10 == 0:
                    print('\t+ collected', sum([len(v) for k,v in self.backlinks.items()]), 'backlinks for', idx+1, 'pages')

            self.backlinks = {k:v for k,v in self.backlinks.items() if v} # remove pages without relevant backlinks
            pickle.dump(self.backlinks, open(filename, 'wb')) # dump for later reuse

        else:
            self.backlinks = pickle.load(open(filename, 'rb'))

        print('\t+ loaded', sum([len(v) for k,v in self.backlinks.items()]), 'backlinks for', len(self.backlinks), 'pages')
示例#34
0
#data for example
#https://github.com/datacharmer/test_db
#https://medium.com/@ramojol/python-context-managers-and-the-with-statement-8f53d4d9f87


#CRUD in SQL
#Create           - INSERT
#Read (Retrieve)  - SELECT
#Update (Modify)  -	UPDATE
#Delete (Destroy) - DELETE
from core import MySQLcompatible
import utils

if __name__ == "__main__":
    with MySQLcompatible('daniel','123456789',) as db:
        utils.show_databases(db)
        utils.connect_database(db,'TRABALHO_BD1')
        utils.query(db, 'select * from ENDERECO')
示例#35
0
	def getComments(page_id):
		"""
		Returns the current value of the global counter.
		"""
		return json.dumps(utils.query("SELECT * FROM tb_Comments WHERE chvPage = ?", page_id))
示例#36
0
 def get_latest_sha(self, repo_name):
     url = BASE_URL.format(name = repo_name)
     response = utils.query(url)
     data = response.text
     results = re.findall(COMMIT_URL.format(sha='(\d+)'), data)
     return results[1]
示例#37
0
import utils

old_data = utils.query("select * from friends;")

new_data = list(
    map(
        lambda x: {
            "index": int(x["idx"]),
            "image": '',
            "link": x["url"],
            "name": x["name"],
            "description": '',
            "posts": [],
        }, old_data))

for d in new_data:
    print(d)

document = utils.mydb["friends"]

ids = document.insert_many(new_data)
示例#38
0
	def deleteComment(page_id, page_number):
		"""Deletes the page_number-th column from comments of page_id."""
		utils.query("UPDATE tb_Comments SET bIsDeleted=1, dtmDeleted=CURRENT_TIMESTAMP WHERE chvPage=? AND intCommentNumber=?", (page_id, page_number))
		return 'Success'
示例#39
0
	def getAllComments():
		"""Returns all comments."""
		return utils.query("SELECT * FROM tb_Comments")
示例#40
0
import utils
import time
import requests

document = utils.mydb["tags"]
tags = {tag["name"]: tag["_id"] for tag in document.find({})}

old_data = utils.query("select * from posts;")

new_data = list(
    map(
        lambda x: {
            "title":
            x["title"],
            "abstract":
            x["abstruct"],
            "view":
            int(x["view"]),
            "url":
            x["url"].lower().replace("/", "_"),
            "publish_time":
            int(time.mktime(time.strptime(x["time"], "%Y-%m-%d %H:%M:%S"))),
            "edit_time":
            int(
                time.mktime(time.strptime(x["updatetime"], "%Y-%m-%d %H:%M:%S")
                            )),
            "content":
            requests.post("http://127.0.0.1:50000/api/markdown", {
                "source": x["raw"]
            }).json()["html"],
            "raw":
    pf = PackageFinder(find_links=[], index_urls=host, use_wheel=True, allow_external=[], allow_unverified=[], allow_all_external=False, allow_all_prereleases=False, process_dependency_links=False, session=session,)

    location = [Link(url, trusted=True)]
    req = InstallRequirement.from_line(package, None)
    versions = []
    for page in pf._get_pages(location, req):
        versions = versions + [version for _, _, version in pf._package_versions(page.links, package)]
    return versions

if __name__ == '__main__':
# e.g. add a new location
    url = "https://pypi.python.org/simple/"
    print url
    while True:
        #response = urllib2.urlopen(url)
        response = query(url)
        soup = BeautifulSoup(response.read())
        for link in soup.find_all("a"):
            package = link.get('href')
            try:
                versions = get_versions(package)
            except:
                traceback.print_exc()
                continue
            for version in versions:
                #package_type = Type.objects.get(app_type = 'Django: Library')
                pkg, created = Package.objects.get_or_create(package_type=Type(name='Django'), name=package, version=version)
                if created:
                    print "found new package: " + package + "==" + version
                else:
                    print "package already exist: " + package + "==" + version
示例#42
0
 def github_query(self, url):
     return utils.query(url, auth = self.auth)
示例#43
0
 def dustcube_info(self):
     qresults = ut.query(float(self.glon), float(self.glat), coordsys='gal')
     if not qresults['success']:
         raise RuntimeWarning(
             'No successful distance determination in dust cube!')
     return qresults
示例#44
0
 def get_latest_sha(self, repo):
     url = BASE_URL.format(name = repo.repo_name())
     response = utils.query(url)
     data = response.text
     results = re.findall(COMMIT_URL.format(sha='(\d+)'), data)
     return results[1]