def download_user_review(url):
  try:
    f = get_url(url)

    page = html.parse(f)
    root = page.getroot()

    if len(root.cssselect("div.error404")) > 0:
      #print url + " 404'ed"
      return {}

    meta = html.tostring(root.cssselect("#player_review div.body div.user_reviews")[0])
    #@TODO parse meta
    if len(root.cssselect("#player_score_details div.body dl.review_details")) > 0:
      score_details = html.tostring(root.cssselect("#player_score_details div.body dl.review_details")[0])
    else:
      score_details = "No Details"
    body = html.tostring(root.cssselect("#player_review_body")[0])

    ret = {}
    ret['meta'] = meta
    ret['score_details'] = score_details
    ret['body'] = body
    #@TODO parse body
    ret['url'] = url
    return ret

    #ipdb.set_trace()
  except:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
コード例 #2
0
def send():
    gmail = GMail('*****@*****.**', 'tuananh1k95')
    msg = Message('message', to='*****@*****.**', text="Hello world")

    gmail.send(msg)

    return "Sended"
コード例 #3
0
ファイル: web_resource.py プロジェクト: aautio/ezmlm-sync
def poll(listname):
    emails, errors = fetch_and_split(urls[listname])

    if errors:
        gmail.send(error_email, "Erronous emails detected: %s" % str(errors))

    return emails
def download_user_review(url):
  try:
    f = get_url(url)

    page = html.parse(f)
    root = page.getroot()

    if len(root.cssselect("div.error404")) > 0:
      #print url + " 404'ed"
      return {}

    meta = html.tostring(root.cssselect("#player_review div.body div.user_reviews")[0])
    #@TODO parse meta
    if len(root.cssselect("#player_score_details div.body dl.review_details")) > 0:
      score_details = html.tostring(root.cssselect("#player_score_details div.body dl.review_details")[0])
    else:
      score_details = "No Details"
    body = html.tostring(root.cssselect("#player_review_body")[0])

    ret = {}
    ret['meta'] = meta
    ret['score_details'] = score_details
    ret['body'] = body
    #@TODO parse body
    ret['url'] = url
    return ret

    #ipdb.set_trace()
  except:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
コード例 #5
0
def send_success_mail(email_address, output_messages):
    subject = "Compile Success!"
    body = "This is just to let you know that your latest submissions to the "
    body += "Google AI Challenge has successfully compiled. Here is the "
    body += "output of the compile script, in case you're curious:\n\n"
    body += output_messages
    body += "\n\nSincerely,\nThe Compile Script"
    gmail.send(email_address, subject, body)
コード例 #6
0
def get_url(url):
  sleep(3)
  try:
    f = urllib.urlopen(url)
    return f
  except:
    gmail.send("couldn't open the url", "*****@*****.**")
    return get_url(url)
コード例 #7
0
ファイル: mico.py プロジェクト: diegorys/mico-robot-emocional
def takePicture():
    global numPictures
    global mailto

    numPictures += 1
    webcam.takePicture()
    gmail.send(mailto, "Foto sacada por MiCo",
               "MiCo ha sacado la foto adjunta a petición del usuario.")
コード例 #8
0
def send_success_mail(email_address, output_messages):
    subject = "Compile Success!"
    body = "This is just to let you know that your latest submissions to the "
    body += "Google AI Challenge has successfully compiled. Here is the "
    body += "output of the compile script, in case you're curious:\n\n"
    body += output_messages
    body += "\n\nSincerely,\nThe Compile Script"
    gmail.send(email_address, subject, body)
コード例 #9
0
def get_url(url):
    sleep(3)
    try:
        f = urllib.urlopen(url)
        return f
    except:
        gmail.send("couldn't open the url", "*****@*****.**")
        return get_url(url)
コード例 #10
0
def itunes_get_url(url):
    sleep(3)
    global itunesopener
    try:
        f = itunesopener.open(url)
        return f
    except:
        gmail.send("couldn't open the url", "*****@*****.**")
        return itunes_get_url(url)
コード例 #11
0
def itunes_get_url(url):
  sleep(3)
  global itunesopener
  try:
    f = itunesopener.open(url)
    return f
  except:
    gmail.send("couldn't open the url", "*****@*****.**")
    return itunes_get_url(url)
コード例 #12
0
def get_url(url):
    try:
        if url.find("http://www.gamespot.com") == -1:
            url = "http://www.gamespot.com" + url
        f = urllib.urlopen(url)
        return f
    except:
        gmail.send("couldn't open the url", "*****@*****.**")
        return get_url(url)
コード例 #13
0
def get_url(url):
  try:
    if url.find("http://www.gamespot.com") == -1:
      url = "http://www.gamespot.com" + url
    f = urllib.urlopen(url)
    return f
  except:
    gmail.send("couldn't open the url", "*****@*****.**")
    return get_url(url)
def get_metadata_and_reviews(url):
  try:
    f = get_url(url)

    page = html.parse(f)
    root = page.getroot()

    if html.tostring(root).find("404 - Page Not Found") != -1:
      #print "Gamespot gave a 404 for this page."
      return None, None, None

    # get list of platforms
    platforms = [e.text_content() for e in root.cssselect("#main ul.platformFilter li") if e.text_content() != 'All Platforms']
    # (html/select data #{[:div#main :ul.platformFilter :li]})

    # scrape the game details
    details_url = "http://www.gamespot.com" + root.cssselect("#mini .mini_col_wrap div.contentNav ul.contentNav .summaryNavItem ul.contentSubNav li.techinfoSubNavItem div.subNavItemWrap a")[0].get("href")
    f = urllib.urlopen(details_url)
    details_page = html.parse(f)
    details_root = details_page.getroot()
    details = html.tostring(details_root.cssselect("#techInfo dl.game_info")[0])
    # @TODO parse publisher, developer, release date, ESRB, ESRB descriptors, official site
    metadata = {}
    metadata['details'] = details
    metadata['platforms'] = platforms

    # get reviews link, pass to get_reviews to see what happens
    reviews_url = "http://www.gamespot.com" + root.cssselect("#mini .mini_col_wrap div.contentNav ul.contentNav li.reviewsNavItem div.navItemWrap a")[0].get("href")
    gamespot_review, user_reviews = get_reviews(reviews_url)

    # get related games under category related games, might need to iterate through pages of related games
    related_games_url = "http://www.gamespot.com" + root.cssselect("#mini .mini_col_wrap div.contentNav ul.contentNav .summaryNavItem ul.contentSubNav li.relatedSubNavItem div.subNavItemWrap a")[0].get("href")
    f = get_url(related_games_url)
    page = html.parse(f)
    root = page.getroot()
    related_games = [html.tostring(l).strip() for l in root.cssselect("#main .listModule.gamesModule .body div.games ol.games li")]
    metadata['related_games'] = related_games

    same_universe_url = "http://www.gamespot.com" + root.cssselect("#main div.relatedGamesNav div.relatedGamesNavWrap div.navItems ol.navItems li.universeNavItem a")[0].get('href')
    f = get_url(same_universe_url)
    page = html.parse(f)
    root = page.getroot()
    same_universe = [html.tostring(l).strip() for l in root.cssselect("#main .listModule.gamesModule .body div.games ol.games li")]
    metadata['same_universe'] = same_universe
    
    whatnot = {}
    whatnot['metadata'] = metadata
    whatnot['gamespot_review'] = gamespot_review
    whatnot['user_reviews'] = user_reviews

    return metadata, gamespot_review, user_reviews

  except Exception as e:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
コード例 #15
0
def get_reviews(url):
    try:
        f = get_url(url)

        page = html.parse(f)
        root = page.getroot()

        # check for user reviews
        #ipdb.set_trace()
        #if html.tostring(page).find("Be the First to tell the world what you think of ") != -1:
        user_reviews = []
        if len(root.cssselect("#main .userReviewsModule")) == 0:
            #print "No user reviews!"
            user_reviews = None
        else:
            root = page.getroot()
            main = root.cssselect("#main .userReviewsModule")[0]
            review_links = main.cssselect("a.continue")
            for r in review_links:
                if r.text_content() == "Read the Review":
                    # download the user review here
                    #print r.get("href")
                    user_reviews.append(download_user_review(r.get("href")))
                    #print "User review: ", ret

            #ipdb.set_trace()
            #print "DO SOMETHING!!"

        # check for gamespot reviews
        review_box = root.cssselect(
            ".navItem.reviewsNavItem.navItemOpen.navItemActive")[0]
        # ensure this is actually the review box
        if html.tostring(review_box).find("Reviews") == -1:
            print "Encountered wrong box for user reviews."
            ipdb.set_trace()

        gamespot_review = None
        if html.tostring(review_box).find("GameSpot Review") != -1:
            elements = review_box.cssselect("a.subNavItemAction")
            for e in elements:
                if html.tostring(e).find("GameSpot Review") != -1:
                    gamespot_review_url = e.get("href")
                    gamespot_review = return_gamespot_review(
                        gamespot_review_url)
                    #print "Found a gamespot review at ", gamespot_review_url, gamespot_review
                    break

        #import ipdb
        #ipdb.set_trace()
        #print html.tostring(page)
        return gamespot_review, user_reviews

    except:
        traceback.print_exc()
        gmail.send("exception!", "*****@*****.**")
        ipdb.set_trace()
コード例 #16
0
def send_fail_mail(email_address, output_messages, error_messages):
    subject = "Compile Failure!"
    body = "Unfortunately, your latest submission to the Google AI Challenge "
    body += "did not compile successfully. Check out the error messages below "
    body += "for more information as to why this might be. Fix as many of the "
    body += "errors as you can, and then submit your code again.\n\n"
    body += "COMPILER OUTPUT\n\n" + output_messages + "\n\n"
    body += "COMPILER ERRORS\n\n" + error_messages + "\n\n"
    body += "Sincerely,\nThe Compile Script"
    gmail.send(email_address, subject, body)
コード例 #17
0
def send_fail_mail(email_address, output_messages, error_messages):
    subject = "Compile Failure!"
    body = "Unfortunately, your latest submission to the Google AI Challenge "
    body += "did not compile successfully. Check out the error messages below "
    body += "for more information as to why this might be. Fix as many of the "
    body += "errors as you can, and then submit your code again.\n\n"
    body += "COMPILER OUTPUT\n\n" + output_messages + "\n\n"
    body += "COMPILER ERRORS\n\n" + error_messages + "\n\n"
    body += "Sincerely,\nThe Compile Script"
    gmail.send(email_address, subject, body)
コード例 #18
0
def get_url(url, depth=0):
    if depth == 10:
        gmail.send("gamespot problems.", "*****@*****.**")
    try:
        sleep(5)
        if url.find("http://www.gamespot.com") == -1:
            url = "http://www.gamespot.com" + url
        f = urllib.urlopen(url)
        return f
    except:
        return get_url(url, depth + 1)
def get_reviews(url):
  try:
    f = get_url(url)

    page = html.parse(f)
    root = page.getroot()

    # check for user reviews
    #ipdb.set_trace()
    #if html.tostring(page).find("Be the First to tell the world what you think of ") != -1:
    user_reviews = [] 
    if len(root.cssselect("#main .userReviewsModule")) == 0:
      #print "No user reviews!"
      user_reviews = None
    else:
      root = page.getroot()
      main = root.cssselect("#main .userReviewsModule")[0]
      review_links = main.cssselect("a.continue")
      for r in review_links:
        if r.text_content() == "Read the Review":
          # download the user review here
          #print r.get("href")
          user_reviews.append(download_user_review(r.get("href")))
          #print "User review: ", ret

      #ipdb.set_trace()
      #print "DO SOMETHING!!"

    # check for gamespot reviews
    review_box = root.cssselect(".navItem.reviewsNavItem.navItemOpen.navItemActive")[0]
    # ensure this is actually the review box
    if html.tostring(review_box).find("Reviews") == -1:
      print "Encountered wrong box for user reviews."
      ipdb.set_trace()

    gamespot_review = None
    if html.tostring(review_box).find("GameSpot Review") != -1:
      elements = review_box.cssselect("a.subNavItemAction")
      for e in elements:
        if html.tostring(e).find("GameSpot Review") != -1:
          gamespot_review_url = e.get("href")
          gamespot_review = return_gamespot_review(gamespot_review_url)
          #print "Found a gamespot review at ", gamespot_review_url, gamespot_review
          break

    #import ipdb
    #ipdb.set_trace()
    #print html.tostring(page)
    return gamespot_review, user_reviews

  except:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
def get_url(url, depth=0):
  if depth == 10:
    gmail.send("gamespot problems.", "*****@*****.**")
  try:
    sleep(5)
    if url.find("http://www.gamespot.com") == -1:
      url = "http://www.gamespot.com" + url
    f = urllib.urlopen(url)
    return f
  except:
    return get_url(url, depth+1)
コード例 #21
0
ファイル: emails.py プロジェクト: aautio/ezmlm-sync
    def handle(self, email):
        # compare and alter subscriptions
        result = listregex.search(email)
        listname = self.get_listname(result.group(1))
        list_emails = result.group(3).split()
        
        url_emails = web_resource.poll(listname)
        
        sub, unsub = diffs_of_lists(url_emails, list_emails)

        sub = map(self.to_subscribe_cmd(listname), sub)
        unsub = map(self.to_unsubscribe_cmd(listname), unsub)

        if sub + unsub:
            gmail.send(sub + unsub)
コード例 #22
0
ファイル: main_server.py プロジェクト: tipith/CamTransport
def on_image(msg):
    if msg['type'] == Messaging.ImageMessage.TYPE_PERIODICAL:
        filename = Datastore.add_image(msg['src'], msg['time'], msg['data'])
        Datastore.db_store_image(msg['src'], msg['time'], filename, len(msg['data']))
    elif msg['type'] == Messaging.ImageMessage.TYPE_MOVEMENT:
        filename = Datastore.add_image_movement(msg['src'], msg['time'], msg['uuid'], msg['data'])
        Datastore.db_store_image_movement(msg['src'], msg['time'], filename, msg['uuid'], len(msg['data']))
        # send only the first picture belonging to a group of pictures from a source. uuid is the group identifier
        if msg['src'] not in email_alert or email_alert[msg['src']] != msg['uuid']:
            email_alert[msg['src']] = msg['uuid']

            if not (datetime.time(8, 0) < datetime.datetime.now().time() < datetime.time(15, 0)):
                if calendar.timegm(time.gmtime()) > email_alert['last'] + 3600:
                    email_alert['last'] = calendar.timegm(time.gmtime())
                    gmail.send('Activity from cam %i' % msg['src'], 'See attachment.', filename)
                else:
                    main_logger.info('skip email alert due to grace period, last alert %u s ago' %
                                     (calendar.timegm(time.gmtime()) - email_alert['last']))
            else:
                main_logger.info('skip email alert during day')
    elif msg['type'] == Messaging.ImageMessage.TYPE_TEST:
        filename = Datastore.add_test_image(msg['src'], msg['time'], msg['data'])
        main_logger.info('wrote {}'.format(filename))
コード例 #23
0
ファイル: tasks.py プロジェクト: gmission/gmission
def send_email(subject, body, receiver):
    logger.info('send email [%s] to : %s' % (subject, receiver,))
    gmail.send(subject, body, receiver)
コード例 #24
0
    body.write(f'{prefix}<h1>http bandwidth</h1><ul>')
    for entry in sorted(http_entries_by_bandwidth.items(),
                        key=lambda i: i[1],
                        reverse=True):
        url = kibana.url(f'http.hostname:{entry[0]}', start)
        body.write(
            f'<li><a href="{url}">{entry[0]}</a> {filesize(entry[1])}</li>')
    body.write(f'</ul>{postfix}')

    body.write(f'{prefix}<h1>tls connections</h1><ul>')
    for entry in sorted(tls_entries_by_count.items(),
                        key=lambda i: i[1],
                        reverse=True):
        url = kibana.url(f'tls.sni:{entry[0]}', start)
        body.write(f'<li><a href="{url}">{entry[0]}</a> {entry[1]}</li>')
    body.write(f'</ul>{postfix}')

    body.write(f'{prefix}<h1>tls bandwidth</h1><ul>')
    for entry in sorted(tls_entries_by_bandwidth.items(),
                        key=lambda i: i[1],
                        reverse=True):
        url = kibana.url(f'tls.sni:{entry[0]}', start)
        body.write(
            f'<li><a href="{url}">{entry[0]}</a> {filesize(entry[1])}</li>')
    body.write(f'</ul>{postfix}')

    body.write('</body></html>')

    gmail.send(None, 'frosty http-stats', body.getvalue(), html=True)
コード例 #25
0
ファイル: delivery.py プロジェクト: thackray/weatherbot
from gmail import send
from pytools import pload
import sys

def make_message(info):
    subj = "Weatherbot forecast for %s"%info['station']
    mess = "Forecast valid for %s<br>"%info['fctime']
    mess += "High: %d +/- %d <br> Low: %d +/- %d"%(info['high'],info['hconf'],
                                           info['low'],info['lconf'])

    return subj, mess


if __name__=='__main__':
    
    tag = sys.argv[1]

    recipients = open('/home/thackray/weatherbot/mailinglist.%s'%tag,
                         'r').read().split('\n')

    info = pload('/home/thackray/weatherbot/'+tag+'.fc')

    subj, mess = make_message(info)

    for recip in recipients:
        send(recip, subj, mess)




コード例 #26
0
import gmail

gmail.send("*****@*****.**", "test", "test")
def return_gamespot_review(url, just_return_review=False):
  try:
    f = get_url(url)

    review = ""
    comments = ""
    gamespot_score = ""
    gamespot_score_word = ""
    metacritic_score = ""
    metacritic_reviews = ""
    metacritic_reviews_link = ""
    ret = {}

    page = html.parse(f)
    root = page.getroot()

    review = []
    review.append(html.tostring(root.cssselect("#main")[0]))
    #print review[0]

    if just_return_review:
      return review[0]

    # check if review has multiple pages
    if len(root.cssselect("#main .pageNav")) > 0:
      # get the number of pages to scrap
      review_links = root.cssselect("#main .pageNav .pages li a")
      for r in review_links:
        review.append(return_gamespot_review("http://www.gamespot.com" + r.get("href"), just_return_review=True))

    gamespot_score = root.cssselect("#side")[0].cssselect("li.editor_score span.data")[0].text_content()
    gamespot_score_word = root.cssselect("#side")[0].cssselect("li.editor_score span.scoreword")[0].text_content()
    if root.cssselect("#side")[0].cssselect("li.review_score span.more")[0].text_content() != "No Reviews":
      #print "Metacritic reviews found"
      metacritic_score = root.cssselect("#side")[0].cssselect("li.review_score span.scoreWrap a")[0].text_content()
      metacritic_reviews = root.cssselect("#side")[0].cssselect("li.review_score span.more span")[0].text_content()
      metacritic_reviews_link = root.cssselect("#side")[0].cssselect("li.review_score span.scoreWrap a")[0].get("href")
    else:
      #print "No metacritic reviews"
      metacritic_score = "No Reviews"
      metacritic_reviews = "No Reviews"
      metacritic_reviews_link = "No Reviews"


    comments = root.cssselect("ul#comments_list li.comment")
    comments = [html.tostring(c) for c in comments]
    # check to see if there are more pages of comments
    if len(root.cssselect("#post_comment .pagination")) > 0:
      # get number of comments
      nav = root.cssselect("#post_comment .pagination")[0]
      num_pages = int(nav.cssselect("ul.pages li.last a")[0].text_content())
      for i in range(num_pages-1):
        link = nav.cssselect(".page_flipper a")[0]

        # parse the parameters for the comments pagination manually
        rel = str(link.get("rel"))
        j = rel.find(" nofollow")
        rel = rel[0:j]
        rel = rel.replace("{", "")
        rel = rel.replace("}", "")
        rel = rel.replace("'", "")
        rel = rel.split(",")
        params = {}
        for r in rel:
          r = r.split(":")
          params[r[0]] = r[1]
        params = urllib.urlencode(params)

        href = "http://www.gamespot.com/pages/ajax/load_comments.php?page=" + str(i+1)
        try:
          f = urllib.urlopen(href, params)
        except:
          traceback.print_exc()
          ipdb.set_trace()

        #ipdb.set_trace()
        response = json.loads(f.read())
        new_comments = html.fromstring(response['template'])
        for c in new_comments.cssselect("ul#comments_list li.comment"):
          comments.append(html.tostring(c))

    """
    print review
    print gamespot_score
    print gamespot_score_word
    print metacritic_score
    print metacritic_reviews
    print metacritic_reviews_link
    print comments
    """

    #ipdb.set_trace()
    #gamespot_score = page.cssselect("#id.

    ret['review'] = review
    ret['comments'] = comments
    ret['gamespot_score'] = gamespot_score
    ret['gamespot_score_word'] = gamespot_score_word
    ret['metacritic_score'] = metacritic_score
    ret['metacritic_reviews'] = metacritic_reviews
    ret['metacritic_reviews_link'] = metacritic_reviews_link
    #@TODO parse gamespot review & comments

    return ret

  except:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()

  return ret
コード例 #28
0
def get_everything(url):
    try:
        for c in [chr(i) for i in range(ord('A'),
                                        ord('Z') + 1)
                  ] + ['*']:  # for every letter from A to Z & the asterisk

            def get_games(url):
                f = get_url(url)
                page = html.parse(f)
                root = page.getroot()
                games_list = root.cssselect("#selectedcontent div.column li a")
                genre = root.cssselect("#title ul.breadcrumb li a")
                genre = unicode(genre[-1].text_content())
                for g in games_list:
                    title = unicode(g.text_content())
                    href = g.get("href")
                    num_existing = ItunesGame.query.filter_by(href=href)
                    if num_existing.count() == 0:
                        # store data about this game to a file
                        global next_id
                        next_id += 1
                        while os.path.exists(str(next_id) + ".txt") == True:
                            next_id += 1
                        f = open(str(next_id) + ".txt", "wb")
                        s = {}
                        s['title'] = title
                        s['genre'] = genre
                        pickle.dump(s, f)
                        f.close()
                        i = ItunesGame(href=href,
                                       filename=str(next_id) + ".txt")
                        session.commit()
                        print "saved " + title
                    else:
                        # add data about this game to the file
                        i = num_existing.first()
                        f = open(i.filename, "rb")
                        data = pickle.load(f)
                        f.close()

                        old_title = data['title']
                        titles = []
                        if type(old_title) in [str, unicode]:
                            if old_title != title:
                                titles.append(old_title)
                                titles.append(title)
                        if type(old_title) == list:
                            titles = old_title
                            if title not in titles:
                                titles.append(title)
                        if len(titles) == 0:
                            titles = old_title
                        data['title'] = titles

                        old_genre = data['genre']
                        genres = []
                        if type(old_genre) in [str, unicode]:
                            if old_genre != genre:
                                genres.append(old_genre)
                                genres.append(genre)
                        if type(old_genre) == list:
                            genres = old_genre
                            if genre not in genres:
                                genres.append(genre)
                        if len(genres) == 0:
                            genres = old_genre
                        data['genre'] = genres

                        f = open(i.filename, "wb")
                        pickle.dump(data, f)
                        f.close()

                        print "saved " + title + " twice."

                next_link = root.cssselect(
                    "#selectedgenre ul.paginate a.paginate-more")
                if len(next_link) > 0:
                    get_games(next_link[0].get("href"))

            get_games(url + "&letter=" + c)

    except:
        traceback.print_exc()
        gmail.send("exception!", "*****@*****.**")
        ipdb.set_trace()
コード例 #29
0
ファイル: items.py プロジェクト: tipith/ToriScraper
    def check_for_alarms(self):
        if not self.db or not len(self.items):
            return
        alarms = self.db.get_alarms()
        if not alarms:
            return
        alarms_sent = {}

        for alarm in alarms:
            if 'SearchPattern' in alarm and alarm['SearchPattern']:
                alarm['RegexSearchPattern'] = re.compile(alarm['SearchPattern'], re.IGNORECASE)
            if 'Location' in alarm and alarm['Location']:
                alarm['RegexLocation'] = re.compile(alarm['Location'], re.IGNORECASE)

        self.logger.debug('alarms: {}'.format(alarms))
        for item in self.items:
            for alarm in alarms:
                description_ok = False if 'SearchPattern' in alarm and alarm['SearchPattern'] else True
                location_ok = False if 'Location' in alarm and alarm['Location'] else True
                maxprice_ok = False if 'MaxPrice' in alarm and alarm['MaxPrice'] else True
                minprice_ok = False if 'MinPrice' in alarm and alarm['MinPrice'] else True

                if 'SearchPattern' in alarm and alarm['SearchPattern']:
                    if alarm['RegexSearchPattern'].match(item.description):
                        description_ok = True

                if description_ok and 'Location' in alarm and alarm['Location']:
                    if alarm['RegexLocation'].match(item.location):
                        location_ok = True

                if description_ok and location_ok and isinstance(item.price, int):
                    if 'MaxPrice' in alarm and alarm['MaxPrice'] and item.price < alarm['MaxPrice']:
                        maxprice_ok = True
                    if 'MinPrice' in alarm and alarm['MinPrice'] and item.price > alarm['MinPrice']:
                        minprice_ok = True

                self.logger.debug('description {0: <20} {1: <20} item value {2: <20}'.format(
                    alarm['SearchPattern'] if alarm['SearchPattern'] else 'None',
                    'passed' if description_ok else 'failed',
                    item.description))
                self.logger.debug('location    {0: <20} {1: <20} item value {2: <20}'.format(
                    alarm['Location'] if alarm['Location'] else 'None',
                    'passed' if location_ok else 'failed',
                    item.location))
                self.logger.debug('maxprice    {0: <20} {1: <20} item value {2: <20}'.format(
                    alarm['MaxPrice'] if alarm['MaxPrice'] else 'None',
                    'passed' if maxprice_ok else 'failed',
                    item.price if item.price else 'None'))
                self.logger.debug('minprice    {0: <20} {1: <20} item value {2: <20}'.format(
                    alarm['MinPrice'] if alarm['MinPrice'] else 'None',
                    'passed' if maxprice_ok else 'failed',
                    item.price if item.price else 'None'))

                if all([description_ok, location_ok, maxprice_ok, minprice_ok]):
                    if item.toriid not in alarms_sent or (
                            item.toriid in alarms_sent and alarm['UserId'] not in alarms_sent[item.toriid]):
                        email = self.db.get_email(alarm['UserId'])
                        if email:
                            self.logger.info(
                                'alarm {} for "{}, {} eur"'.format(email, item.description, item.price))
                            gmail.send(email, 'Tori.fi: {}, {}'.format(item.description, item.price),
                                       item.toriurl, None)
                            self.db.store_item_alarm(alarm['UserId'], item)
                        else:
                            self.logger.info('alarm found "{}, {} eur"'.format(item.description, item.price))
                        alarms_sent.setdefault(item.toriid, []).append(alarm['UserId'])
                    else:
                        self.logger.info('alarm already sent to UserId {} for "{}, {} eur"'.format(alarm['UserId'],
                                                                                                   item.description,
                                                                                                   item.price))
コード例 #30
0
def get_everything(gamespot, iphone):
  global next_id
  try:
    f = get_url(gamespot+iphone+page_url(0))
    page = f.read()
    #ipdb.set_trace()
    #page = page.replace("gs:product", "div")
    #page = page.replace("gs:buy-price", "div")
    #page = page.replace("gs:buy-button", "div")
    root = html.fromstring(page)

    num_pages = int(root.cssselect("ul.pages li.last a")[0].text_content())

    for page_num in range(1685, num_pages):
      sleep(5)
      print "getting page " + str(page_num)
      f = get_url(gamespot + iphone + page_url(page_num))
      page = html.parse(f)
      games = page.getroot().cssselect('#filter_results div.body table tbody tr')

      for game in games:
        #ipdb.set_trace()
        try:
          title = unicode(game.cssselect('th a')[0].text_content())

          href = game.cssselect('th a')[0].get('href')
          if href.find("http://www.gamespot.com") == -1:
            href = "http://www.gamespot.com" + href

          upc = html.tostring(game.cssselect('td')[0])

          platform = game.cssselect('td')[1].text_content()
          #genre_url = game.cssselect('td.genre a')[0].get('href')

          genre = game.cssselect('td')[2].text_content()

          score = game.cssselect('td')[3].text_content()

          release_date = game.cssselect('td')[4].text_content()

          s = {}
          s["title"] = unicode(title)
          s["href"] = href
          s["upc"] = upc
          s["platform"] = platform
          s["genre"] = genre
          s["score"] = score
          s["release_date"] = release_date

          """
          metadata, gamespot_review, user_reviews = get_metadata_and_reviews(href)
          s = {}
          s["title"] = title
          s["href"] = href
          s["upc"] = upc
          s["platform"] = platform
          s["genre"] = genre
          s["score"] = score
          s["release_date"] = release_date
          s["metadata"] = metadata
          s["gamespot_review"] = gamespot_review
          s["user_reviews"] = user_reviews
          """
          
          # if we already found this game, add the new title to the file about it
          prev = GameData.query.filter_by(href=href).all()
          if len(prev) > 0:
            f = open(prev[0].filename, "rb")
            try:
              derp = pickle.load(f)
            except EOFError as e: # basically we opened this file and crashed
              f.close()
              # so recreate it 
              # copypasta of logic below
              next_id += 1
              while os.path.exists(str(next_id)+".txt") == True:
                print "incremented!"
                next_id += 1
              f = open(str(next_id) + ".txt", "wb")
              pickle.dump(s, f)
              f.close()
              continue

            old_title = derp['title']
            titles = []
            if type(old_title) == str or type(old_title) == unicode:
              if old_title == title: # if we've already gotten this title, continue
                continue
              titles.append(old_title)
              titles.append(title)
            if type(old_title) == list:
              for t in old_title:
                if t == title: # if we've already gotten this title, we should just move on
                  continue
              titles = old_title
              titles.append(title)
            derp['title'] = titles
            f.close()
            f = open(prev[0].filename, "wb")
            pickle.dump(derp, f)
            f.close()
            continue

          next_id += 1
          while os.path.exists(str(next_id)+".txt") == True:
            print "incremented!"
            next_id += 1
          f = open(str(next_id) + ".txt", "wb")
          pickle.dump(s, f)
          f.close()

          c = GameData(href=href, filename=str(next_id) + ".txt", page_num=page_num)
          session.commit()
        except:
          traceback.print_exc()
          gmail.send("exception!", "*****@*****.**")
          ipdb.set_trace()


  except Exception as e:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
コード例 #31
0
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
    t0 = time.time()
    per_epoch_time = []

    DATASET_NAME = dataset.name

    if MODEL_NAME in ['GCN', 'GAT']:
        if net_params['self_loop']:
            print(
                "[!] Adding graph self-loops for GCN/GAT models (central node trick)."
            )
            dataset._add_self_loops()

    trainset, valset, testset = dataset.train, dataset.val, dataset.test

    root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
    device = net_params['device']

    # Write the network and optimization hyper-parameters in folder config/
    with open(write_config_file + '.txt', 'w') as f:
        f.write(
            """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n"""
            .format(DATASET_NAME, MODEL_NAME, params, net_params,
                    net_params['total_param']))

    log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
    writer = SummaryWriter(log_dir=log_dir)

    # setting seeds
    random.seed(params['seed'])
    np.random.seed(params['seed'])
    torch.manual_seed(params['seed'])
    if device == 'cuda':
        torch.cuda.manual_seed(params['seed'])

    print("Training Graphs: ", len(trainset))
    print("Validation Graphs: ", len(valset))
    print("Test Graphs: ", len(testset))
    print("Number of Classes: ", net_params['n_classes'])

    model = gnn_model(MODEL_NAME, net_params)
    model = model.to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=params['init_lr'],
                           weight_decay=params['weight_decay'])
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=params['lr_reduce_factor'],
        patience=params['lr_schedule_patience'],
        verbose=True)

    epoch_train_losses, epoch_val_losses = [], []
    epoch_train_accs, epoch_val_accs = [], []

    # batching exception for Diffpool
    drop_last = True if MODEL_NAME == 'DiffPool' else False

    train_loader = DataLoader(trainset,
                              batch_size=params['batch_size'],
                              shuffle=True,
                              drop_last=drop_last,
                              collate_fn=dataset.collate)
    val_loader = DataLoader(valset,
                            batch_size=params['batch_size'],
                            shuffle=False,
                            drop_last=drop_last,
                            collate_fn=dataset.collate)
    test_loader = DataLoader(testset,
                             batch_size=params['batch_size'],
                             shuffle=False,
                             drop_last=drop_last,
                             collate_fn=dataset.collate)

    # At any point you can hit Ctrl + C to break out of training early.
    try:
        with tqdm(range(params['epochs'])) as t:
            for epoch in t:

                t.set_description('Epoch %d' % epoch)

                start = time.time()

                epoch_train_loss, epoch_train_acc, optimizer = train_epoch(
                    model, optimizer, device, train_loader, epoch)
                epoch_val_loss, epoch_val_acc = evaluate_network(
                    model, device, val_loader, epoch)

                epoch_train_losses.append(epoch_train_loss)
                epoch_val_losses.append(epoch_val_loss)
                epoch_train_accs.append(epoch_train_acc)
                epoch_val_accs.append(epoch_val_acc)

                writer.add_scalar('train/_loss', epoch_train_loss, epoch)
                writer.add_scalar('val/_loss', epoch_val_loss, epoch)
                writer.add_scalar('train/_acc', epoch_train_acc, epoch)
                writer.add_scalar('val/_acc', epoch_val_acc, epoch)
                writer.add_scalar('learning_rate',
                                  optimizer.param_groups[0]['lr'], epoch)

                _, epoch_test_acc = evaluate_network(model, device,
                                                     test_loader, epoch)
                t.set_postfix(time=time.time() - start,
                              lr=optimizer.param_groups[0]['lr'],
                              train_loss=epoch_train_loss,
                              val_loss=epoch_val_loss,
                              train_acc=epoch_train_acc,
                              val_acc=epoch_val_acc,
                              test_acc=epoch_test_acc)

                per_epoch_time.append(time.time() - start)

                # Saving checkpoint
                ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
                if not os.path.exists(ckpt_dir):
                    os.makedirs(ckpt_dir)
                torch.save(model.state_dict(),
                           '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))

                files = glob.glob(ckpt_dir + '/*.pkl')
                for file in files:
                    epoch_nb = file.split('_')[-1]
                    epoch_nb = int(epoch_nb.split('.')[0])
                    if epoch_nb < epoch - 1:
                        os.remove(file)

                scheduler.step(epoch_val_loss)

                if optimizer.param_groups[0]['lr'] < params['min_lr']:
                    print("\n!! LR EQUAL TO MIN LR SET.")
                    break

                # Stop training after params['max_time'] hours
                if time.time() - t0 > params['max_time'] * 3600:
                    print('-' * 89)
                    print(
                        "Max_time for training elapsed {:.2f} hours, so stopping"
                        .format(params['max_time']))
                    break

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early because of KeyboardInterrupt')

    _, test_acc = evaluate_network(model, device, test_loader, epoch)
    _, train_acc = evaluate_network(model, device, train_loader, epoch)
    print("Test Accuracy: {:.4f}".format(test_acc))
    print("Train Accuracy: {:.4f}".format(train_acc))
    print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0))
    print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))

    writer.close()
    """
        Write the results in out_dir/results folder
    """
    with open(write_file_name + '.txt', 'w') as f:
        f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
    FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
    Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
          .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
                  np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time)))

    # send results to gmail
    try:
        from gmail import send
        subject = 'Result for Dataset: {}, Model: {}'.format(
            DATASET_NAME, MODEL_NAME)
        body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
    FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
    Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
          .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
                  np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time))
        send(subject, body)
    except:
        pass
コード例 #32
0
def get_everything(gamespot, iphone):
    global next_id
    try:
        f = get_url(gamespot + iphone + page_url(0))
        page = f.read()
        #ipdb.set_trace()
        #page = page.replace("gs:product", "div")
        #page = page.replace("gs:buy-price", "div")
        #page = page.replace("gs:buy-button", "div")
        root = html.fromstring(page)

        num_pages = int(root.cssselect("ul.pages li.last a")[0].text_content())

        for page_num in range(1685, num_pages):
            sleep(5)
            print "getting page " + str(page_num)
            f = get_url(gamespot + iphone + page_url(page_num))
            page = html.parse(f)
            games = page.getroot().cssselect(
                '#filter_results div.body table tbody tr')

            for game in games:
                #ipdb.set_trace()
                try:
                    title = unicode(game.cssselect('th a')[0].text_content())

                    href = game.cssselect('th a')[0].get('href')
                    if href.find("http://www.gamespot.com") == -1:
                        href = "http://www.gamespot.com" + href

                    upc = html.tostring(game.cssselect('td')[0])

                    platform = game.cssselect('td')[1].text_content()
                    #genre_url = game.cssselect('td.genre a')[0].get('href')

                    genre = game.cssselect('td')[2].text_content()

                    score = game.cssselect('td')[3].text_content()

                    release_date = game.cssselect('td')[4].text_content()

                    s = {}
                    s["title"] = unicode(title)
                    s["href"] = href
                    s["upc"] = upc
                    s["platform"] = platform
                    s["genre"] = genre
                    s["score"] = score
                    s["release_date"] = release_date
                    """
          metadata, gamespot_review, user_reviews = get_metadata_and_reviews(href)
          s = {}
          s["title"] = title
          s["href"] = href
          s["upc"] = upc
          s["platform"] = platform
          s["genre"] = genre
          s["score"] = score
          s["release_date"] = release_date
          s["metadata"] = metadata
          s["gamespot_review"] = gamespot_review
          s["user_reviews"] = user_reviews
          """

                    # if we already found this game, add the new title to the file about it
                    prev = GameData.query.filter_by(href=href).all()
                    if len(prev) > 0:
                        f = open(prev[0].filename, "rb")
                        try:
                            derp = pickle.load(f)
                        except EOFError as e:  # basically we opened this file and crashed
                            f.close()
                            # so recreate it
                            # copypasta of logic below
                            next_id += 1
                            while os.path.exists(str(next_id) +
                                                 ".txt") == True:
                                print "incremented!"
                                next_id += 1
                            f = open(str(next_id) + ".txt", "wb")
                            pickle.dump(s, f)
                            f.close()
                            continue

                        old_title = derp['title']
                        titles = []
                        if type(old_title) == str or type(
                                old_title) == unicode:
                            if old_title == title:  # if we've already gotten this title, continue
                                continue
                            titles.append(old_title)
                            titles.append(title)
                        if type(old_title) == list:
                            for t in old_title:
                                if t == title:  # if we've already gotten this title, we should just move on
                                    continue
                            titles = old_title
                            titles.append(title)
                        derp['title'] = titles
                        f.close()
                        f = open(prev[0].filename, "wb")
                        pickle.dump(derp, f)
                        f.close()
                        continue

                    next_id += 1
                    while os.path.exists(str(next_id) + ".txt") == True:
                        print "incremented!"
                        next_id += 1
                    f = open(str(next_id) + ".txt", "wb")
                    pickle.dump(s, f)
                    f.close()

                    c = GameData(href=href,
                                 filename=str(next_id) + ".txt",
                                 page_num=page_num)
                    session.commit()
                except:
                    traceback.print_exc()
                    gmail.send("exception!", "*****@*****.**")
                    ipdb.set_trace()

    except Exception as e:
        traceback.print_exc()
        gmail.send("exception!", "*****@*****.**")
        ipdb.set_trace()
コード例 #33
0
ファイル: emails.py プロジェクト: aautio/ezmlm-sync
 def handle(self, email):
     # respond with confirmation email
     confirm_to = cuseregex.search(email).group(4)
     gmail.send(confirm_to)
コード例 #34
0
from gmail import send
from pytools import pload
import sys


def make_message(info):
    subj = "Weatherbot forecast for %s" % info['station']
    mess = "Forecast valid for %s<br>" % info['fctime']
    mess += "High: %d +/- %d <br> Low: %d +/- %d" % (
        info['high'], info['hconf'], info['low'], info['lconf'])

    return subj, mess


if __name__ == '__main__':

    tag = sys.argv[1]

    recipients = open('/home/thackray/weatherbot/mailinglist.%s' % tag,
                      'r').read().split('\n')

    info = pload('/home/thackray/weatherbot/' + tag + '.fc')

    subj, mess = make_message(info)

    for recip in recipients:
        send(recip, subj, mess)
コード例 #35
0
def track(previousStatus):
    status = journal.getArticleStatus()
    if status != previousStatus:
        gmail.send(journal.getName(), status)
    return status
コード例 #36
0
    logger.info('time_next = {}'.format(datetime_string(T_next)))
    logger.info("Going to Sleep for {} seconds...".format(T_next - 40 -
                                                          (T_now)))
    time.sleep(3)

    ACC.py.setup_sleep(T_next - 40 - (T_now + 3))
    ACC.py.go_to_sleep()
    return


try:
    setup()
    config_measurement()
    measure()
    store_to_SD()
    send_file()
    deep_sleep()
except Exception:
    logger.exception('Unknown exception caught for emailing...')
    logging.fclose()

    to = '*****@*****.**'
    subject = 'WG: Exception Report from GCAM{}-ACC{}'.format(iCAM, iACC)
    logger.info('logfile_new={}'.format(logfile_new))
    with open(logfile_new, 'r') as file:
        logs = file.read()
    contents = 'Log file\n--------\n' + logs
    gmail.send(to, subject, contents)
finally:
    deep_sleep_exception()
コード例 #37
0
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):

    start0 = time.time()
    per_epoch_time = []

    DATASET_NAME = dataset.name

    if MODEL_NAME in ['GCN', 'GAT', 'SGC']:
        if net_params['self_loop']:
            print("[!] Adding graph self-loops (central node trick).")
            dataset._add_self_loops()

    root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
    device = net_params['device']

    print('The seed is ', params['seed'])
    random.seed(params['seed'])
    torch.manual_seed(params['seed'])
    if device == 'cuda':
        torch.cuda.manual_seed(params['seed'])

    np.random.seed(params['seed'])
    num_nodes = dataset.train_mask.size(0)
    index = torch.tensor(np.random.permutation(num_nodes))
    print('index:', index)
    train_index = index[:int(num_nodes * 0.6)]
    val_index = index[int(num_nodes * 0.6):int(num_nodes * 0.8)]
    test_index = index[int(num_nodes * 0.8):]

    train_mask = index_to_mask(train_index, size=num_nodes)
    val_mask = index_to_mask(val_index, size=num_nodes)
    test_mask = index_to_mask(test_index, size=num_nodes)

    train_mask = train_mask.to(device)
    val_mask = val_mask.to(device)
    test_mask = test_mask.to(device)

    labels = dataset.labels.to(device)

    # Write network and optimization hyper-parameters in folder config/
    with open(write_config_file + '.txt', 'w') as f:
        f.write(
            """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n"""
            .format(DATASET_NAME, MODEL_NAME, params, net_params,
                    net_params['total_param']))

    log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
    writer = SummaryWriter(log_dir=log_dir)

    print("Training Nodes: ", train_mask.int().sum().item())
    print("Validation Nodes: ", val_mask.int().sum().item())
    print("Test Nodes: ", test_mask.int().sum().item())
    print("Number of Classes: ", net_params['n_classes'])

    model = gnn_model(MODEL_NAME, net_params)
    model = model.to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=params['init_lr'],
                           weight_decay=params['weight_decay'])
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
    #                                                 factor=params['lr_reduce_factor'],
    #                                                 patience=params['lr_schedule_patience'],
    #                                                 verbose=True)

    epoch_train_losses, epoch_val_losses = [], []
    epoch_train_accs, epoch_val_accs = [], []

    graph = dataset.graph
    nfeat = graph.ndata['feat'].to(device)
    efeat = graph.edata['feat'].to(device)
    norm_n = dataset.norm_n.to(device)
    norm_e = dataset.norm_e.to(device)

    # At any point you can hit Ctrl + C to break out of training early.
    try:
        with tqdm(range(params['epochs'])) as t:
            best_val_acc = 0
            for epoch in t:

                t.set_description('Epoch %d' % epoch)

                start = time.time()

                epoch_train_loss, epoch_train_acc, optimizer = train_epoch(
                    model, optimizer, device, graph, nfeat, efeat, norm_n,
                    norm_e, train_mask, labels, epoch)

                epoch_val_loss, epoch_val_acc = evaluate_network(
                    model, graph, nfeat, efeat, norm_n, norm_e, val_mask,
                    labels, epoch)

                epoch_train_losses.append(epoch_train_loss)
                epoch_val_losses.append(epoch_val_loss)
                epoch_train_accs.append(epoch_train_acc)
                epoch_val_accs.append(epoch_val_acc)

                writer.add_scalar('train/_loss', epoch_train_loss, epoch)
                writer.add_scalar('val/_loss', epoch_val_loss, epoch)
                writer.add_scalar('train/_acc', epoch_train_acc, epoch)
                writer.add_scalar('val/_acc', epoch_val_acc, epoch)
                writer.add_scalar('learning_rate',
                                  optimizer.param_groups[0]['lr'], epoch)

                _, epoch_test_acc = evaluate_network(model, graph, nfeat,
                                                     efeat, norm_n, norm_e,
                                                     test_mask, labels, epoch)
                t.set_postfix(time=time.time() - start,
                              lr=optimizer.param_groups[0]['lr'],
                              train_loss=epoch_train_loss,
                              val_loss=epoch_val_loss,
                              train_acc=epoch_train_acc,
                              val_acc=epoch_val_acc,
                              test_acc=epoch_test_acc)

                per_epoch_time.append(time.time() - start)

                # Saving checkpoint
                ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
                if not os.path.exists(ckpt_dir):
                    os.makedirs(ckpt_dir)
                torch.save(model.state_dict(),
                           '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
                if best_val_acc < epoch_val_acc:
                    best_val_acc = epoch_val_acc
                    torch.save(model.state_dict(),
                               '{}.pkl'.format(ckpt_dir + "/best"))

                files = glob.glob(ckpt_dir + '/*.pkl')
                for file in files:
                    if file[-8:] == 'best.pkl':
                        continue
                    else:
                        epoch_nb = file.split('_')[-1]
                        epoch_nb = int(epoch_nb.split('.')[0])
                        if epoch_nb < epoch - 1:
                            os.remove(file)

                #scheduler.step(epoch_val_loss)

                if optimizer.param_groups[0]['lr'] < params['min_lr']:
                    optimizer.param_groups[0]['lr'] = params['min_lr']
                    #print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
                    #break

                # Stop training after params['max_time'] hours
                if time.time() - start0 > params['max_time'] * 3600:
                    print('-' * 89)
                    print(
                        "Max_time for training elapsed {:.2f} hours, so stopping"
                        .format(params['max_time']))
                    break

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early because of KeyboardInterrupt')

    model.load_state_dict(torch.load('{}.pkl'.format(ckpt_dir + "/best")))
    _, test_acc = evaluate_network(model, graph, nfeat, efeat, norm_n, norm_e,
                                   test_mask, labels, epoch)
    _, val_acc = evaluate_network(model, graph, nfeat, efeat, norm_n, norm_e,
                                  val_mask, labels, epoch)
    _, train_acc = evaluate_network(model, graph, nfeat, efeat, norm_n, norm_e,
                                    train_mask, labels, epoch)
    print("Test Accuracy: {:.4f}".format(test_acc))
    print("Train Accuracy: {:.4f}".format(train_acc))
    print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - start0))
    print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))

    writer.close()
    """
        Write the results in out_dir/results folder
    """
    with open(write_file_name + '.txt', 'w') as f:
        f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
    FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
    Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
          .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
                  test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time)))

    # send results to gmail
    try:
        from gmail import send
        subject = 'Result for Dataset: {}, Model: {}'.format(
            DATASET_NAME, MODEL_NAME)
        body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
    FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
    Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
          .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
                  test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time))
        send(subject, body)
    except:
        pass

    return val_acc, test_acc
コード例 #38
0
def get_everything(url):
  try:
    for c in [chr(i) for i in range(ord('A'), ord('Z')+1)] + ['*']: # for every letter from A to Z & the asterisk
      def get_games(url):
        f = get_url(url)
        page = html.parse(f)
        root = page.getroot()
        games_list = root.cssselect("#selectedcontent div.column li a")
        genre = root.cssselect("#title ul.breadcrumb li a")
        genre = unicode(genre[-1].text_content())
        for g in games_list:
          title = unicode(g.text_content())
          href = g.get("href")
          num_existing = ItunesGame.query.filter_by(href=href)
          if num_existing.count() == 0:
            # store data about this game to a file
            global next_id
            next_id += 1
            while os.path.exists(str(next_id)+".txt") == True:
              next_id += 1
            f = open(str(next_id) + ".txt", "wb")
            s = {}
            s['title'] = title
            s['genre'] = genre
            pickle.dump(s, f)
            f.close()
            i = ItunesGame(href=href, filename=str(next_id)+".txt")
            session.commit()
            print "saved " + title
          else:
            # add data about this game to the file
            i = num_existing.first()
            f = open(i.filename, "rb")
            data = pickle.load(f)
            f.close()

            old_title = data['title']
            titles = []
            if type(old_title) in [str, unicode]:
              if old_title != title:
                titles.append(old_title)
                titles.append(title)
            if type(old_title) == list:
              titles = old_title
              if title not in titles:
                titles.append(title)
            if len(titles) == 0:
              titles = old_title
            data['title'] = titles

            old_genre = data['genre']
            genres = []
            if type(old_genre) in [str, unicode]:
              if old_genre != genre:
                genres.append(old_genre)
                genres.append(genre)
            if type(old_genre) == list:
              genres = old_genre
              if genre not in genres:
                genres.append(genre)
            if len(genres) == 0:
              genres = old_genre
            data['genre'] = genres

            f = open(i.filename, "wb")
            pickle.dump(data, f)
            f.close()

            print "saved " + title + " twice."

        next_link = root.cssselect("#selectedgenre ul.paginate a.paginate-more")
        if len(next_link) > 0:
          get_games(next_link[0].get("href"))

      get_games(url + "&letter=" + c)

  except:
    traceback.print_exc()
    gmail.send("exception!", "*****@*****.**")
    ipdb.set_trace()
コード例 #39
0
def return_gamespot_review(url, just_return_review=False):
    try:
        f = get_url(url)

        review = ""
        comments = ""
        gamespot_score = ""
        gamespot_score_word = ""
        metacritic_score = ""
        metacritic_reviews = ""
        metacritic_reviews_link = ""
        ret = {}

        page = html.parse(f)
        root = page.getroot()

        review = []
        review.append(html.tostring(root.cssselect("#main")[0]))
        #print review[0]

        if just_return_review:
            return review[0]

        # check if review has multiple pages
        if len(root.cssselect("#main .pageNav")) > 0:
            # get the number of pages to scrap
            review_links = root.cssselect("#main .pageNav .pages li a")
            for r in review_links:
                review.append(
                    return_gamespot_review("http://www.gamespot.com" +
                                           r.get("href"),
                                           just_return_review=True))

        gamespot_score = root.cssselect("#side")[0].cssselect(
            "li.editor_score span.data")[0].text_content()
        gamespot_score_word = root.cssselect("#side")[0].cssselect(
            "li.editor_score span.scoreword")[0].text_content()
        if root.cssselect("#side")[0].cssselect(
                "li.review_score span.more")[0].text_content() != "No Reviews":
            #print "Metacritic reviews found"
            metacritic_score = root.cssselect("#side")[0].cssselect(
                "li.review_score span.scoreWrap a")[0].text_content()
            metacritic_reviews = root.cssselect("#side")[0].cssselect(
                "li.review_score span.more span")[0].text_content()
            metacritic_reviews_link = root.cssselect("#side")[0].cssselect(
                "li.review_score span.scoreWrap a")[0].get("href")
        else:
            #print "No metacritic reviews"
            metacritic_score = "No Reviews"
            metacritic_reviews = "No Reviews"
            metacritic_reviews_link = "No Reviews"

        comments = root.cssselect("ul#comments_list li.comment")
        comments = [html.tostring(c) for c in comments]
        # check to see if there are more pages of comments
        if len(root.cssselect("#post_comment .pagination")) > 0:
            # get number of comments
            nav = root.cssselect("#post_comment .pagination")[0]
            num_pages = int(
                nav.cssselect("ul.pages li.last a")[0].text_content())
            for i in range(num_pages - 1):
                link = nav.cssselect(".page_flipper a")[0]

                # parse the parameters for the comments pagination manually
                rel = str(link.get("rel"))
                j = rel.find(" nofollow")
                rel = rel[0:j]
                rel = rel.replace("{", "")
                rel = rel.replace("}", "")
                rel = rel.replace("'", "")
                rel = rel.split(",")
                params = {}
                for r in rel:
                    r = r.split(":")
                    params[r[0]] = r[1]
                params = urllib.urlencode(params)

                href = "http://www.gamespot.com/pages/ajax/load_comments.php?page=" + str(
                    i + 1)
                try:
                    f = urllib.urlopen(href, params)
                except:
                    traceback.print_exc()
                    ipdb.set_trace()

                #ipdb.set_trace()
                response = json.loads(f.read())
                new_comments = html.fromstring(response['template'])
                for c in new_comments.cssselect("ul#comments_list li.comment"):
                    comments.append(html.tostring(c))
        """
    print review
    print gamespot_score
    print gamespot_score_word
    print metacritic_score
    print metacritic_reviews
    print metacritic_reviews_link
    print comments
    """

        #ipdb.set_trace()
        #gamespot_score = page.cssselect("#id.

        ret['review'] = review
        ret['comments'] = comments
        ret['gamespot_score'] = gamespot_score
        ret['gamespot_score_word'] = gamespot_score_word
        ret['metacritic_score'] = metacritic_score
        ret['metacritic_reviews'] = metacritic_reviews
        ret['metacritic_reviews_link'] = metacritic_reviews_link
        #@TODO parse gamespot review & comments

        return ret

    except:
        traceback.print_exc()
        gmail.send("exception!", "*****@*****.**")
        ipdb.set_trace()

    return ret
コード例 #40
0
ファイル: tasks.py プロジェクト: xiaoz2020/gmission
def send_email(subject, body, receiver):
    logger.info('send email [%s] to : %s' % (
        subject,
        receiver,
    ))
    gmail.send(subject, body, receiver)