Example #1
0
    def get(self):
        id = self.get_argument("id", None)
        task = None

        if id:
            task = self.db.get("select * from tasks where id = %s", id)

        self.render("task_compose.html",task=task, mem_topics=getTopics(self))
Example #2
0
def main():
    signal.signal(signal.SIGINT, signalHandler)

    cfg = JsonConfig()

    logger = logging.getLogger('')
    logger.setLevel(cfg.general.logLevel)

    server = BetelbotServer(connection=BetelbotConnection, topics=getTopics())
    server.listen(cfg.server.port)

    IOLoop.instance().start()
Example #3
0
    def onInit(self, **kwargs):
        logging.info(BetelbotServer.LOG_SERVER_RUNNING)

        topics = kwargs.get(BetelbotServer.PARAM_TOPICS, getTopics())
        topicSubscribers = dict((key,[]) for key in topics.keys())
        defaults = {
            BetelbotServer.PARAM_TOPICS: topics,
            BetelbotServer.PARAM_TOPIC_SUBSCRIBERS: topicSubscribers,
            BetelbotServer.PARAM_SERVICES: {}
        }
        self.data.update(defaults, True)
        self.data.update(kwargs, False)
Example #4
0
def main():
    # Start up a Betelbot client and subscribe to all topics. When data is
    # received, print to console.
    #
    # The main purpose of this script is for logging messages.

    signal.signal(signal.SIGINT, signalHandler)

    cfg = JsonConfig()

    logger = logging.getLogger('')
    logger.setLevel(cfg.general.logLevel)

    client = Client('', cfg.server.port, BetelbotClientConnection)
    conn = client.connect()

    topics = getTopics()
    for topic in topics:
        conn.subscribe(topic, onTopicPublished)

    IOLoop.instance().start()
Example #5
0
if __name__ == '__main__':
    """
    example urls:
    url = 'http://www.amazon.com/Cuisinart-CPT-122-Compact-2-Slice-Toaster/dp/B009GQ034C/ref=sr_1_1?
                s=kitchen&ie=UTF8&qid=1431620315&sr=1-1&keywords=toaster'
    url = 'http://blog.rei.com/camp/how-to-introduce-your-indoorsy-friend-to-the-outdoors/'
    url = 'http://www.cnn.com/2013/06/10/politics/edward-snowden-profile/'
    url = 'http://jakeaustwick.me/scraping-content-with-readability-and-python/'
    """

    # add command lines
    parser = argparse.ArgumentParser(description='give the url, and find the most common topic words')
    parser.add_argument('url', help = 'the url to crawl')
    parser.add_argument('--stemming', action = 'store_true', help = 'define stemming will be performed')
    parser.add_argument('-s', '--stopWordFile', default = 'stop_words', help = 'stop word file path')
    parser.add_argument('-w', '--wordPattern', default = r'\w+[-_]\w+|\w+', help =
                        'reuglar expression defining the word pattern, default is \w+[-_]\w+|\w+')
    parser.add_argument('-d', '--sentDelimiter', default = r'.,!?:;', help =
                        'the delimiter to get short sentence, default is .,?!:;')
    parser.add_argument('-n', '--wordCnt', default = 6, type = int, help = 'number of words to be returned')
    parser.add_argument('-m', '--phraseCnt', default = 4, type = int, help = 'number of phrases to be returned')
    args = parser.parse_args()


    crawler = Crawler(args.url)
    topic = topic.Topic(crawler.getTexts(), args)
    topic.getTopicsHelper(crawler.getTexts())
    res = topic.getTopics()
    print res