コード例 #1
0
 def getparse(self):
     if connect(self.url) is not None:
         intemp, outtemp = connect(self.url)
         t3 = 0
         h1 = 0
         l1 = 0
         tm = strftime('%H')
         td = strftime('%d/%m/%Y')
         db.session.add(Temp(intemp, outtemp, t3, h1, l1, tm, td))
         db.session.commit()
     else:
         intemp, outtemp = None, None
         t3 = 0
         h1 = 0
         l1 = 0
         tm = strftime('%H')
         td = strftime('%d/%m/%Y')
         db.session.add(Temp(intemp, outtemp, t3, h1, l1, tm, td))
         db.session.commit()
         print(
             "---------------------------------------------------------------------------"
         )
         print(
             ".getparse() fail, intemp and outtemp write to database as 'None' and 'None'"
         )
         print(
             "---------------------------------------------------------------------------"
         )
コード例 #2
0
def home():
    connect()
    if not session.get('logged_in'):
        return render_template('login.html')
    else:
        order = 0
        message = recent_message(order)
        cm = recent_comment()
        return render_template('index.html', message=message, comment=cm)
コード例 #3
0
 def getcurrent(self):
     if connect(self.url) is not None:
         intemp, outtemp = connect(URL)
         return intemp, outtemp, self.url
     else:
         intemp = None
         outtemp = None
         print(
             "-------------------------------------------------------------------"
         )
         print(
             ".getcurrent() fail, intemp and outtemp defined as 'None' and 'None'"
         )
         print(
             "-------------------------------------------------------------------"
         )
         return intemp, outtemp, self.url
コード例 #4
0
ファイル: crawler.py プロジェクト: bentruitt/TopicStory
    def crawl(self):
        '''
        Not abstract. Begins a crawl.
        Crawls until MAX_VISITS is reached, unless:
            - self.decide_next_visit(conn) returns None
            - Five exceptions in a row
        '''
        # initialize variables
        visits = 0
        MAX_VISITS = 1000 # so we don't just keep crawling forever
        bad_urls = set() # when a url doesn't work, add url_id to bad_urls, ignore in future
        error_count = 0
        base_url_string = self.base_url_string
        conn = connect()

        # initialize logging
        initialize_logging(base_url_string)
        start_time = time.time()
        logging.info('STARTING CRAWL AT TIME: {0}'.format(util.time_string(start_time)))

        # initlialize database for this crawl
        base_url_id = queries.insert_url(conn, base_url_string)
        source_id = queries.insert_source(conn, base_url_string)
        crawl_id = queries.insert_crawl(conn, base_url_string)

        while True:
            if error_count == 5:
                logging.error('Too many exceptions in a row, exiting.')
                break
            visit_url = self.decide_next_visit(conn, crawl_id, bad_urls)
            if visit_url is None:
                logging.info('Finished crawling, no more urls to visit.')
                break
            try:
                logging.info('Visiting {}'.format(visit_url['url']))
                self.visit(conn, crawl_id, source_id, visit_url)
                error_count = 0
            except Exception as e:
                logging.error('Error when downloading {0}'.format(visit_url['url']))
                logging.error(traceback.format_exc())
                bad_urls.add(visit_url['id'])
                error_count += 1
            visits += 1
            if visits == MAX_VISITS:
                logging.info('Finished crawling, reached max visits of {}'.format(MAX_VISITS))
                break
            self.sleep()
コード例 #5
0
    def __init__(self, host: str = "127.0.0.1", port: int = 6379, password="", encode: str = "utf-8", debug: bool=False):
        self.host = host
        self.port = port
        self.password = password
        self.encode = encode
        self.debug = debug
        self.conn = connect(host=host, port=port)

        self.String = String(self)
        self.Hash = Hash(self)
        self.List = List(self)
        self.Set = Set(self)
        self.Zset = Zset(self)
        self.Key = Key(self)

        if self.password:
            self.raw(f"auth {self.password}")
コード例 #6
0
        print("Fatal Error! Ending connection.")
        quit()


def main(pw, key):
    for new_input in iter(lambda: getch(), b"q"):
        try:
            if int(new_input) in range(1, 6):
                handler(new_input, pw, key)
                interface.main_interface()
        except:
            pass


if __name__ == "__main__":
    conn = conn.connect()
    cursor = conn.cursor()

    cursor.execute(
        """
    select * from Secret_Hash;
    """
    )

    if cursor.rowcount == 0:
        print(">> You need to setup a password.")
        set_password(getpass())

    print(">> Please input Password Manager secret password")
    password = str.encode(getpass())
コード例 #7
0
def get_db():
    db = getattr(g, '_database', None)
    if db is None:
        db = connect()
        g._database = db
    return db
コード例 #8
0
ファイル: run_topics.py プロジェクト: bentruitt/TopicStory
import cPickle as pickle

import topics
import util
from conn import connect


def run_topics(start_date=datetime.date(2017, 02, 20),
               end_date=datetime.date(2017, 02, 26),
               num_topics=50):
    print 'creating topics...'
    print 'start date: {}'.format(start_date)
    print 'end date: {}'.format(end_date)
    print 'num topics: {}'.format(num_topics)

    conn = connect()

    articles = util.load_articles(conn, start_date, end_date)
    documents = articles['text']
    topic_pipeline = topics.TopicPipeline(num_topics=num_topics)
    topic_pipeline.fit(documents)

    assigned_topics = topic_pipeline.topics.argmax(axis=1)
    articles['topic'] = assigned_topics
    articles = articles.drop(['text'], axis=1)
    topic_pipeline.articles = articles

    store_nmf_results(conn, num_topics, start_date, end_date, articles,
                      topic_pipeline)

    conn.close()
コード例 #9
0
#!/usr/bin/env python3
from conn import connect
from read_zabbix import get_traffic
from read_zabbix import get_item_id_1
import time
start_time = time.time()
connection = connect()
itemid = (132483,)

traffic, clock = get_traffic(connection, itemid)

for i in range(len(traffic)):
    traffic_temp = '{:.2f} Mbps'.format(traffic[i] / 1000000)
    print(f'{traffic_temp} -- {clock[i]}')

connection.close()
duration = time.time() - start_time
print("it takes about {:.2f} sec".format(duration))
コード例 #10
0
from conn import connect



Session = connect()
コード例 #11
0
def create_index():
    es = connect()
    # Create one index for wiki, ignore the warning of index already exists
    es.indices.create(index='wiki',ignore=400)

    return es
コード例 #12
0
            sys.exit(0)
        term = input(
            "Term (everything after space is ignored. Press 'q' to stop):")
        if term == 'q':
            print("Goodbye !")
            sys.exit(0)
        k = input("Number of desired results (Press 'q' to stop):")
        if k == 'q':
            print("Goodbye !")
            sys.exit(0)

        if ' ' in term:
            term = term[:term.find(':')]
        k = int(k)
        tmpdir = None
        try:
            client = connect()
            if method == 'simple':
                res = simplesearch(client, term, k)
            elif method == 'rerank':
                res = rerankedsearch(client, term, k, inverve_score)
            else:
                res, tmpdir = memefficientrerankedsearch(
                    client, term, k, inverve_score)
            for hit in res:
                print(str(hit).encode('utf-8'))
        finally:
            if tmpdir != None:
                rmtree(tmpdir)
            es_close(client)