Пример #1
0
def export_all(format, path, datas):
    """
    将所有结果数据导出到一个文件

    :param str format: 导出文件格式
    :param str path: 导出文件路径
    :param list datas: 待导出的结果数据
    """
    format = check_format(format, len(datas))
    timestamp = get_timestamp()
    name = f'all_subdomain_result_{timestamp}'
    path = check_path(path, name, format)
    logger.log('INFOR', f'所有主域的子域结果 {path}')
    row_list = list()
    for row in datas:
        row.pop('header')
        row.pop('response')
        row.pop('module')
        row.pop('source')
        row.pop('elapsed')
        row.pop('count')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(path, content)
Пример #2
0
    def query(self, sql, columns=None, **kwargs):

        headers = {
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "Accept-Language":
            "zh-CN,zh;q=0.8",
            "Connection":
            "keep-alive",
            "Host":
            "192.168.0.159:8007",
            "Referer":
            "http://192.168.0.159:8007/clustering",
            "User-Agent":
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36"
        }

        self.params.update({"q": sql})

        rep = requests.get(self.db_url, params=self.params, headers=headers)
        content = rep.text.split('\n')

        rows_gen = (Record(json.loads(row).keys(),
                           json.loads(row).values()) for row in content
                    if row.strip())

        results = RecordCollection(rows_gen)

        return results
Пример #3
0
def recordsHandler():
    if not g.user:
        return redirect('/')

    if request.method == "POST":
        app.logger.info(request.method)
        req = request.form
        # Logout
        if 'logoutBtn' in req:
            app.logger.info("Logout user: "******"No medical records found.")
        return redirect('/medic')

    resList = []
    for res in result:
        print(res)
        resList.append(Record(res[0], res[1], res[2], res[3]))

    return render_template('records.html',
                           list=resList,
                           client_name=resList[0].name,
                           client_tel=resList[0].tel)
Пример #4
0
def add():
    form = RecordForm()
    if form.validate_on_submit():
        record = Record()
        record.id = uuid.uuid4()
        record.value = request.form['value']
        record.timestamp = datetime.now()
        RECORDS.append(record)
        return redirect(url_for('home'))
    return render_template('add.html', form=form)
Пример #5
0
    def query(self, sql, columns=None, **kwargs):

        rows = self.conn.execute(sql)

        row_gen = (Record(columns, row) for row in rows)

        # Convert psycopg2 results to RecordCollection.
        results = RecordCollection(row_gen)
        # # # Fetch all results if desired.
        # if fetchall:
        #     results.all()

        return results
Пример #6
0
def iquery(self, query, batches=100):
    cursor = self._conn.execute(text(query))

    columns = cursor.keys()
    history = []
    for i, row in enumerate(cursor, start=1):
        history.extend(
            list(RecordCollection(
                (Record(columns, _row) for _row in (row, )))))
        if i % batches == 0:
            yield history
            history.clear()
    if history:
        yield history
 def checkDuplicates(self, line):
     # Returns true for unique records, stores duplicates
     ret = True
     cancer = False
     s = line.strip().split(self.d)
     if self.col.Patient and s[self.col.Patient] in self.reps.ids:
         if self.col.Code and "8" in s[self.col.Code]:
             cancer = True
         # Sort duplicates and store for later
         rec = Record(s[self.col.Sex], s[self.col.Age], s[self.col.Patient],
                      s[self.col.Species], cancer, s[self.col.ID])
         self.reps.sortReplicates(rec)
         self.dups[s[self.col.ID]] = line
         ret = False
     return ret
Пример #8
0
 def query(self, sql, columns=None, **kwargs):
     try:
         dsl = json.loads(sql)
         index_name = kwargs.pop("index_name", None)
         type_name = kwargs.pop("type_name", None)
         data_gen = (Record(line['_source'].keys(),
                            line['_source'].values())
                     for line in self.db.search(body=dsl,
                                                index=index_name,
                                                doc_type=type_name,
                                                _source_include=columns)
                     ['hits']['hits'])
         result = RecordCollection(data_gen)
         return result
     except Exception as e:
         print(e)
Пример #9
0
def export_all_results(path, name, format, datas):
    path = check_path(path, name, format)
    logger.log('ALERT', f'The subdomain result for all main domains: {path}')
    row_list = list()
    for row in datas:
        if 'header' in row:
            row.pop('header')
        if 'response' in row:
            row.pop('response')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(path, content)
Пример #10
0
def process_docs(docs_dataset):
    invert_index = {}
    for row in docs_dataset:
        record = Record(keys=docs_dataset.headers, values=row)
        logging.info("处理文档: %s" % record.id)
        if not record.doc.strip():
            logging.warning("文档内容为空")
            continue
        # 分词并获取词性
        words_pos = word_segment(record.doc)
        # 清洗单词
        words = clean_words(words_pos)
        word_frequency = get_word_frequency(words)
        logging.info("文档词频统计结果: %s" % word_frequency)
        for word, frequency in word_frequency.items():
            if word in invert_index:
                invert_index[word].append((record.id, frequency))
            else:
                invert_index[word] = [(record.id, frequency)]
    return invert_index
Пример #11
0
def visit(elem, depth=0):
    content = elem[CONTENT_INDEX]
    # 解析名字
    name = content.text
    if name is None or name.strip() == "":
        name = get_name()

    # 如果是a标签, 则需提取url
    if content.tag == "a":
        urls = content.get("href"),
        url = urls[0]
        createds = content.get("add_date"),
        created = createds[0]
        record = Record(keys=["url", "created"], values=[url, created])
    else:
        record = None
    elem_obj = Element(name=name, data=record)
    for child in elem.findall(CHILD_XPATH):
        elem_obj.add_child(visit(child, depth + 1))
    return elem_obj
Пример #12
0
def export_all(format, datas):
    format = check_format(format, len(datas))
    dpath = check_dpath()
    timestamp = get_timestamp()
    fpath = dpath.joinpath(f'all_subdomain_{timestamp}.{format}')
    row_list = list()
    for row in datas:
        row.pop('header')
        row.pop('response')
        row.pop('module')
        row.pop('source')
        row.pop('elapsed')
        row.pop('count')
        keys = row.keys()
        values = row.values()
        if format in {'xls', 'xlsx'}:
            values = check_value(values)
        row_list.append(Record(keys, values))
    rows = RecordCollection(iter(row_list))
    content = rows.export(format)
    save_data(fpath, content)
Пример #13
0
    def giveInsulin(self, amount: float):
        print("Trying to deliver {} units of insulin.".format(amount))
        from records import Record
        r = Record()
        scrollRate = r.getScrollRate()
        lastDC = r.getDutyCycle()
        ratio = .03  # Not a set ratio, I have to design the gearbox first.

        try:
            import RPi.GPIO as GPIO
            GPIO.setmode(GPIO.BCM)
            GPIO.setup(17, GPIO.OUT)
            servo = GPIO.PWM(17, 50)
            servo.start(lastDC)

            for i in range(amount / scrollRate):
                dutycycle = lastDC + (i * ratio)
                servo.ChangeDutyCycle(dutycycle)
                print(
                    "Servo dutycycle is now {}.\n{} units out of {} of insulin delivered as of now."
                    .format(dutycycle, i * scrollRate, amount))
                r.setDutyCycle(dutycycle)
                sleep(.5)

            servo.stop()
            GPIO.cleanup()

        except ImportError or ModuleNotFoundError:
            print(
                "This is likely not running on a Raspberry Pi.\nIf it is, make sure RPi is installed for Python 3.\n\nRunning print loop now instead of sending servo commands."
            )
            print(amount / scrollRate)
            for i in range(int(amount / scrollRate)):
                dutycycle = lastDC + (i * ratio)
                print(
                    "Servo dutycycle is now {}.\n{} units out of {} of insulin delivered as of now."
                    .format(dutycycle, i * scrollRate, amount))
                r.setDutyCycle(dutycycle)
                sleep(.5)
Пример #14
0
dist = 50
limit = 100

color_list = ("b.", "r.", "g.", "m.", "c.", "y.")

with open('jsondata.json') as f:
	x = json.load(f, object_hook=lambda d: SimpleNamespace(**d))

print(x.features[2].properties.filename)
X = np.zeros((1, 2))
list_rec=[]
weight_list = []

for i in range(len(x.features)):
    list_rec.append(Record(i, x.features[i].properties))
    X = np.vstack((X, [list_rec[i].lat, list_rec[i].lon]))
    list_rec[i].calc_weight()
    weight_list.append(list_rec[i].weight) 


X = X[1:-1]
weight_list = weight_list[0:-1]

#print(np.unique(cat_list,return_index=True))
#print(x.features[6].properties)

cluster = sklearn.DBSCAN(eps=0.0001, min_samples=100).fit(X,y=None,sample_weight=weight_list)

dumps = []
cluster_count = np.unique(cluster.labels_)
Пример #15
0
def recordsPage(current_page):
    current_page.frame.pack_forget()
    current_page = Record(window, patientInfoPage)