示例#1
0
def main():

    while (True):
        print("----------------------------------", flush=True)
        print("Running scan...", flush=True)
        #returned value format
        #{"current_price" : currentPrice, "last_updated" : last_updated, "next_update" : nextUpdate}
        process = subprocess.run(["python", "getPrice.py"],
                                 capture_output=True,
                                 text=True)
        returnedValues = eval(process.stdout)
        #the date returned is unix time and needs to be converted
        next_update = datetime.fromtimestamp(returnedValues["next_update"])
        last_updated = datetime.fromtimestamp(returnedValues["last_updated"])
        print("Laestrite Ore: " + str(returnedValues["current_price"]),
              flush=True)
        print("Last update: " + last_updated.strftime("%Y/%m/%d") + ", " +
              last_updated.strftime('%H:%M:%S'),
              flush=True)
        print("Pausing until: " + next_update.strftime("%Y/%m/%d") + ", " +
              next_update.strftime('%H:%M:%S'),
              flush=True)
        pause.until(next_update)
        #pausing for 2 minute to give the site time to refresh
        pause.minutes(2)
示例#2
0
    def test_minutes(self):
        """ test_minutes
        Test 1 minute delay
        """
        now = time.time()
        pause.minutes(1)
        end = time.time()

        # True if 1 minute has passed
        diff = int((end - now) / 60)
        self.assertEqual(diff, 1)
示例#3
0
文件: my.py 项目: wrfly/xyv6
    def __init__(self):
        self.SENDGRID_API_KEY = 'SG.www--www.www'
        while 1:
            self.conn = cymysql.connect(host=Config.MYSQL_HOST,
                                        port=Config.MYSQL_PORT,
                                        user=Config.MYSQL_USER,
                                        passwd=Config.MYSQL_PASS,
                                        db=Config.MYSQL_DB,
                                        charset='utf8')
            self.cur = self.conn.cursor()

            self.scan_users()

            self.conn.commit()
            self.cur.close()
            self.conn.close()
            pause.minutes(10)
    def investment_loop(self):
        """
        Start the investment loop
        Check the account every so often (default is every 60 minutes) for funds to invest
        The frequency is defined by the 'frequency' value in the ~/.lcinvestor/settings.yaml file
        """
        self.loop = True
        frequency = self.settings.user_settings['frequency']
        while self.loop:

            # Make sure the site is available (network could be reconnecting after sleep)
            attempts = 0
            while not self.lc.is_site_available() and self.loop:
                attempts += 1
                if attempts % 5 == 0:
                    self.logger.warn('LendingClub is not responding. Trying again in 10 seconds...')
                sleep(10)

            # Invest
            self.attempt_to_invest()
            pause.minutes(frequency)
    def investment_loop(self):
        """
        Start the investment loop
        Check the account every so often (default is every 60 minutes) for funds to invest
        The frequency is defined by the 'frequency' value in the ~/.lcinvestor/settings.yaml file
        """
        self.loop = True
        frequency = self.settings.user_settings['frequency']
        while self.loop:

            # Make sure the site is available (network could be reconnecting after sleep)
            attempts = 0
            while not self.lc.is_site_available() and self.loop:
                attempts += 1
                if attempts % 5 == 0:
                    self.logger.warn('LendingClub is not responding. Trying again in 10 seconds...')
                sleep(10)

            # Invest
            self.attempt_to_invest()
            pause.minutes(frequency)
示例#6
0
文件: my.py 项目: wrfly/xyv6
def update_user():
    # 更新用户套餐
    while True:
        pause.minutes(1)
        conn = cymysql.connect(host=Config.MYSQL_HOST,
                               port=Config.MYSQL_PORT,
                               user=Config.MYSQL_USER,
                               passwd=Config.MYSQL_PASS,
                               db=Config.MYSQL_DB,
                               charset='utf8')
        cur = conn.cursor()
        # 体验套餐流量用完后变成免费套餐
        net_query_sql = 'UPDATE user set plan=\'C\',u=0, money = 0, d=0, transfer_enable=\'3221225472\' where transfer_enable-u-d < 10240 and plan = \'D\';'
        cur.execute(net_query_sql)
        conn.commit()

        # A/B/D/G 套餐到期后变成免费套餐
        net_query_sql = 'UPDATE user set plan=\'C\', money=0, u=0, d=0, transfer_enable=\'3221225472\' where vip_end_time < ' + str(
            int(time.time())
        ) + ' and ( plan = \'A\' or plan = \'B\' or plan = \'D\' or plan = \'G\' );'
        cur.execute(net_query_sql)
        conn.commit()

        # B 套餐每月减少money和更新流量
        net_query_sql = 'UPDATE user set money=money-10, vip_month=vip_month-1, u=0, d=0, transfer_enable=\'32212254720\' where vip_end_time-(3600*24*31*( vip_month - 1 )) < ' + str(
            int(time.time())) + ' and plan = \'B\';'
        cur.execute(net_query_sql)
        conn.commit()

        # 更新用户在线信息
        net_query_sql = 'UPDATE user set w_n = 0 where t + 20 < ' + str(
            int(time.time())) + ';'
        cur.execute(net_query_sql)
        conn.commit()

        cur.close()
        conn.close()
示例#7
0
def submit_tso(tso, ts, output_dir):
    # get params from tso object
    url = tso.create_search_url()
    tso_params = parse_qs(url)
    since_id = tso_params.get('since_id', None)
    since_id = since_id if since_id is None else since_id[0]
    keywords = tso_params.get('?q', None)
    if keywords:
        keywords = set(keywords[0].split(' '))
        keywords = [kw.strip('"') for kw in keywords if kw != 'OR']
        keywords = set(
            keywords)  #just in case stripinus '"' atsirado duplikatu

    # use defaultdict(list) as each tso might have many pages
    # append all pages to a list and aggregate after
    max_id = defaultdict(list)
    max_date = defaultdict(list)
    min_date = defaultdict(list)
    count = defaultdict(list)
    window_count = 0

    ts.search_tweets(tso)
    try_next = True
    while try_next:
        # parse response
        meta = ts.get_metadata()
        remaining_limit = int(meta.get('x-rate-limit-remaining', 0))
        num_tweets = ts.get_amount_of_tweets()

        # process tweets if there are any
        if num_tweets != 0:
            tweets = ts.get_tweets().get('statuses', [])
            write_tweets(tweets, output_dir)
            # for now only with cashtags
            # todo: hashtags and simple keywords..
            current_max_id = max([t['id'] for t in tweets])  # max id off all
            for kw in keywords:
                kw_tweets = [
                    t for t in tweets if kw in
                    ['$' + i['text'] for i in t['entities']['symbols']]
                ]
                max_id[kw].append(current_max_id)  # max id off all tso
                if len(kw_tweets) != 0:
                    max_date[kw].append(
                        max([
                            pd.to_datetime(tweet['created_at'], utc=True)
                            for tweet in kw_tweets
                        ]))
                    min_date[kw].append(
                        min([
                            pd.to_datetime(tweet['created_at'], utc=True)
                            for tweet in kw_tweets
                        ]))
                    count[kw].append(len(kw_tweets))

        if remaining_limit == 0:
            try:
                limit_reset = int(
                    meta.get('x-rate-limit-reset',
                             time.time() +
                             15 * 60)) + 10  # extra sec to be on the safe side
                # convert to correct datetime
                limit_reset_dt = pd.to_datetime(limit_reset,
                                                unit='s',
                                                utc=True)
                limit_reset_dt = limit_reset_dt.tz_convert('Europe/London')
                logging.debug(
                    'Sleeping until {:%H:%M:%S}'.format(limit_reset_dt))
                pause.until(limit_reset)
            except Exception as e:
                logging.warn('limit_reset ERROR')
                logging.warn(str(e))
                logging.warn('Sleep for 15min...')
                # wait the maximum time until next window...
                pause.minutes(15)
            window_count += 1
        # check if there is a next page for the tso
        try:
            try_next = ts.search_next_results()
        except:
            try_next = False

    # aggregate stats for current tso
    tso_stats = []
    for kw in keywords:
        max_id[kw] = max(max_id[kw]) if len(max_id[kw]) != 0 else since_id  #??
        max_date[kw] = max(max_date[kw]) if len(max_date[kw]) != 0 else None
        min_date[kw] = min(min_date[kw]) if len(min_date[kw]) != 0 else None
        count[kw] = sum(count[kw])

        tso_stats.append({
            'keyword':
            kw,
            'count':
            count[kw],
            'min_date':
            min_date[kw].strftime('%Y-%m-%d %H:%M:%S')
            if not min_date[kw] is None else None,
            'max_date':
            max_date[kw].strftime('%Y-%m-%d %H:%M:%S')
            if not max_date[kw] is None else None,
            'max_id':
            max_id[kw],
            'search_date':
            pd.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        })

    return tso_stats, window_count
示例#8
0
            storeData["speed"]["download"] = np.append(
                storeData["speed"]["download"], 0.0)
            storeData["speed"]["upload"] = np.append(
                storeData["speed"]["upload"], 0.0)
            storeData["speed"]["ping"] = np.append(storeData["speed"]["ping"],
                                                   np.nan)
    else:
        try:
            storeData = runSpeedTest(storeData, pingOnly=True)
        except:
            storeData["speed"]["download"] = np.append(
                storeData["speed"]["download"], 0.0)
            storeData["speed"]["upload"] = np.append(
                storeData["speed"]["upload"], 0.0)
            storeData["speed"]["ping"] = np.append(storeData["speed"]["ping"],
                                                   np.nan)

    speedI = speedI + 1
    if speedI == speedCheckInt - 1:
        speedI = 0

    # save data to file
    fileOut = open(saveDataFile, 'wb')
    pickle.dump(storeData, fileOut)
    fileOut.close()

    # update graphs

    # pause until required
    pause.minutes(hubCheckDeltaT)
示例#9
0
        links.insert(counter,video_link+' '+time) #using INSERT for mixing videos
        
        counter += 2 #counter for mixing video from other 
    counter = 0

#watching videos
link_file = os.path.dirname(sys.argv[0]) + '/watched.txt'
for link in links:
    link_and_duration = link.split(' ')
    #check watched videos and ignore it
    if any(link_and_duration[0] in s for s in open(link_file, "r")):
        print "watched"
    else:
        driver.get(link_and_duration[0])
        duration = link_and_duration[1].split(":") 
        #check if video less hour     
        if len(duration) == 2:
            #insert 0 hours if video less hour
            duration.insert(0,'0')
        wr = open(link_file,"a")
        #wr.write(title+"\n")
        wr.write(link+"\n")#write link to watched file
        wr.close()

        #sleep while user is watching
        pause.hours(int(duration[0]))
        pause.minutes(int(duration[1]))
        pause.seconds(int(duration[2]))

driver.quit()
示例#10
0
def twitter_search(db_file, output_dir, keywords_file):

    ts = TwitterSearch.TwitterSearch(
        consumer_key=twitter_keys.consumer_key,
        consumer_secret=twitter_keys.consumer_secret,
        access_token=twitter_keys.access_token,
        access_token_secret=twitter_keys.access_token_secret)

    start = time.time()
    window_count = 1
    conn = sqlite3.connect(db_file)
    c = conn.cursor()

    if keywords_file:
        keywords = helpers.get_keywords_file(keywords_file)
    else:
        keywords = helpers.get_keywords_sql(db_file)

    pbar = tqdm(keywords)
    for keyword in pbar:
        logging.debug('Getting: ' + keyword)
        # keyword = keyword.replace('/','_')
        pbar.set_description("Processing {:10}".format(keyword))
        pbar.refresh()

        tso = TwitterSearch.TwitterSearchOrder()
        tso.set_include_entities(True)
        tso.set_result_type('recent')
        tso.set_keywords([keyword])

        # only look for tweets since last search..
        c.execute('SELECT max_id FROM latest_search WHERE keyword=?',
                  [keyword])
        fetched = c.fetchone()
        since_id = fetched[0] if not fetched is None else None
        if since_id: tso.set_since_id(since_id)

        ts.search_tweets(tso)

        max_id = []
        max_date = []
        min_date = []
        count = []

        try_next = True
        while try_next:
            # parse response
            meta = ts.get_metadata()
            remaining_limit = int(meta.get('x-rate-limit-remaining', 0))
            num_tweets = ts.get_amount_of_tweets()

            tweets = ts.get_tweets().get('statuses', [])
            helpers.write_tweets(tweets, output_dir)

            if num_tweets != 0:
                max_id.append(max([tweet['id'] for tweet in tweets]))
                max_date.append(
                    max([
                        pd.to_datetime(tweet['created_at'], utc=True)
                        for tweet in tweets
                    ]))
                min_date.append(
                    min([
                        pd.to_datetime(tweet['created_at'], utc=True)
                        for tweet in tweets
                    ]))
                count.append(num_tweets)

            if remaining_limit == 0:
                try:
                    limit_reset = int(
                        meta.get('x-rate-limit-reset',
                                 time.time() + 15 *
                                 60)) + 10  # extra sec to be on the safe side
                    # convert to correct datetime
                    limit_reset_dt = pd.to_datetime(limit_reset,
                                                    unit='s',
                                                    utc=True)
                    limit_reset_dt = limit_reset_dt.tz_convert('Europe/London')
                    pbar.set_description(
                        'Sleeping until {:%H:%M:%S}'.format(limit_reset_dt))
                    pbar.refresh()
                    pause.until(limit_reset)
                    pbar.set_description("Processing %s" % keyword)
                    pbar.refresh()
                    window_count += 1
                except Exception as e:
                    logging.warn('limit_reset ERROR: ' + keyword)
                    logging.warn(str(e))
                    logging.warn('Sleep for 15min...')
                    # wait the maximum time until next window...
                    pbar.set_description("Sleeping for 15 min.")
                    pbar.refresh()

                    pause.minutes(15)

                    pbar.set_description("Processing {:10}".format(keywords))
                    pbar.refresh()
                    window_count += 1

            # check if there is a next page for this search
            try:
                try_next = ts.search_next_results()
            except:
                try_next = False

        # stats and logging for current keyword
        max_id = max(max_id) if len(max_id) != 0 else since_id
        max_date = max(max_date) if len(max_date) != 0 else None
        min_date = min(min_date) if len(min_date) != 0 else None
        count = sum(count)

        search_stats = {
            'keyword':
            keyword,
            'count':
            count,
            'min_date':
            min_date.strftime('%Y-%m-%d %H:%M:%S')
            if not min_date is None else None,
            'max_date':
            max_date.strftime('%Y-%m-%d %H:%M:%S')
            if not max_date is None else None,
            'max_id':
            max_id,
            'search_date':
            pd.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        }

        helpers.dict_to_sqlite(search_stats, 'latest_search', db_file)

    # stats and logging for iteration
    end = time.time()
    total_time = round((end - start) / 60)
    iteration_stats = {
        'start_time': pd.to_datetime(start,
                                     unit='s').strftime('%Y-%m-%d %H:%M:%S'),
        'duration_min': total_time,
        'keywords': len(keywords),
        'tweets_got': ts.get_statistics()[1],
        'queries_submitted': ts.get_statistics()[0],
        'windows_used': window_count,
    }
    helpers.dict_to_sqlite(iteration_stats, 'iterations', db_file)

    logging.info('Total number of windows: ' + str(window_count))
    logging.info('Total time (min): ' + str(total_time))
    logging.info('Total tweets got: ' + str(ts.get_statistics()[1]))

    # close db file
    c.close()
    conn.close()
示例#11
0
    helpers.check_db(args.db_file)

    #
    # Main loop
    #
    counter = 1
    while True:
        logging.info('Started cycle {:d}'.format(counter))
        try:
            twitter_search(db_file=args.db_file,
                           output_dir=args.output_dir,
                           keywords_file=args.keywords_file)
        except TwitterSearch.TwitterSearchException as e:
            logging.warn('TwitterSearchException')
            logging.warn(str(e))
            logging.warn('Pausing for 15 min.')
            pause.minutes(15)
        except Exception as e:
            logging.warn('Something unexpected happened.')
            logging.warn(str(e))
            pause.minutes(5)
        counter += 1
        logging.info('Pausing for 3 min, safe to terminate.')
        logging.info('ctrl+C')
        try:
            pause.minutes(3)
        except:
            logging.info('Bye bye...')
            sys.exit()
        print()
def watch_jenkins():
    global TRACKED_INFORMATION
    global IS_JOB_BUILDING
    while 1:
        current_time = datetime.datetime.now().time()
        if (SHOULD_PAUSE and (not current_time >= START_WATCH
                              or not current_time <= END_WATCH)):
            print_and_log(
                constants.PAUSING_WATCHDOG_MESSAGE.format(
                    resume_time=str(START_WATCH)), 'INFO')
            pause.minutes(60)
            continue

        iteration_start_time = time.time()
        old_tracked_information = deepcopy(TRACKED_INFORMATION)

        notification_string = ''
        notification_type = ''
        jobs_added_count = 0
        jobs_deleted_count = 0
        jobs_status_change_count = 0

        is_job_building = False

        for repo_name, repo_jobs in TRACKED_INFORMATION.items():
            try:
                repo_url = '%s/api/json/' % TRACKED_INFORMATION[repo_name][
                    'repo_url']
                get_response = get(repo_url)
                jenkins_info = json.loads(
                    get_response.text.encode('utf-8').strip())
                for job in jenkins_info['jobs']:
                    job_name = job['name']
                    TRACKED_INFORMATION[repo_name]['watched_jobs'][
                        job_name] = job
            except ConnectionError as e:
                print_and_log(
                    constants.CONNECT_ERROR_MESSAGE.format(repo_url=repo_url),
                    'ERROR')
                print_and_log(e, 'ERROR')
                continue
            except ValueError as e:
                print_and_log(
                    constants.VALUE_ERROR_MESSAGE.format(repo_url=repo_url),
                    'ERROR')
                print_and_log(e, 'ERROR')
                continue
            current_repo_jobs = TRACKED_INFORMATION[repo_name]['watched_jobs']
            old_repo_jobs = old_tracked_information[repo_name]['watched_jobs']
            for old_repo_job_name, old_repo_job_information in old_repo_jobs.items(
            ):
                full_job_name = repo_name + '/' + old_repo_job_name
                if (not old_repo_job_name in current_repo_jobs):
                    #Job was deleted from repo
                    if (not notification_type == constants.STATUS_NOTIF_TYPE
                            and
                            not notification_type == constants.ADD_NOTIF_TYPE):
                        notification_type = constants.DELETE_NOTIF_TYPE  # Delete notification has lowest priority
                        if (not notification_string
                                and not jobs_deleted_count):
                            notification_string = constants.JOB_WAS_DELETED.format(
                                job_name=full_job_name)
                    jobs_deleted_count += 1
                    log_information(
                        constants.JOB_WAS_DELETED.format(
                            job_name=full_job_name))

            for job_name, job_information in current_repo_jobs.items():
                full_job_name = repo_name + '/' + job_name
                current_job_name = job_information['name']
                current_job_color = job_information['color']
                current_job_url = job_information['url']
                current_repo = TRACKED_INFORMATION
                if ('anime' in current_job_color):
                    is_job_building = True
                if (not current_job_name in old_repo_jobs):
                    # Job was added to repo
                    if (not notification_type == constants.STATUS_NOTIF_TYPE):
                        notification_type = constants.ADD_NOTIF_TYPE  # Add has second highest priority
                        if (not notification_string and not jobs_added_count):
                            notification_string = constants.JOB_WAS_ADDED.format(
                                job_name=full_job_name)
                    jobs_added_count += 1
                    log_information(
                        constants.JOB_WAS_ADDED.format(job_name=full_job_name))
                elif (current_job_color != old_repo_jobs[job_name]['color']):
                    # Job changed status
                    notification_type = constants.STATUS_NOTIF_TYPE  # Status has highest priority
                    if (not notification_string
                            and not jobs_status_change_count):
                        notification_string = constants.JOB_CHANGED_STATUS.format(
                            job_name=full_job_name,
                            old_status=old_repo_jobs[job_name]['color'],
                            new_status=current_job_color)
                    jobs_status_change_count += 1
                    log_information(
                        constants.JOB_CHANGED_STATUS.format(
                            job_name=full_job_name,
                            old_status=old_repo_jobs[job_name]['color'],
                            new_status=current_job_color))

        if (notification_type):
            if (not notification_string
                    and notification_type == constants.DELETE_NOTIF_TYPE):
                notification_string = constants.MULTIPLE_DELETES.format(
                    count=jobs_deleted_count)
            elif (not notification_string
                  and notification_type == constants.ADD_NOTIF_TYPE):
                notification_string = constants.MULTIPLE_ADDITIONS.format(
                    count=jobs_added_count)
            elif (not notification_string
                  and notification_type == constants.STATUS_NOTIF_TYPE):
                notification_string = constants.MULTIPLE_STATUS_CHANGES.format(
                    count=jobs_status_change_count)
            notification.notify(title='Jenkins Watchdog',
                                message=notification_string,
                                app_icon=None,
                                timeout=10)
        # stopped_time = datetime.datetime.now().time()
        if not is_job_building:
            print('INFO:%s - No jobs building. Sleeping for 5 minutes.' %
                  get_formatted_time())
            pause.minutes(5)
        else:
            print('INFO:%s - Jobs are building. Sleeping for 1 minute.' %
                  get_formatted_time())
            pause.minutes(1)