Пример #1
0
 def test_24_special_d(self):
     """Removal All Specials"""
     tab = CronTab(tabfile=os.path.join(TEST_DIR, 'data', 'specials.tab'))
     tab.remove_all()
     self.assertEqual(len(list(tab)), 0)
Пример #2
0
    else:
        print 'get_page_json(url="%s", offset="%s") returned status code %s' % (
            r.url, offset, r.status_code)


# Make sure the event time representation is in Denver timezone
def return_mountain_time(timestamp):
    return datetime.fromtimestamp(
        timestamp / 1000, tz=timezone('US/Mountain')).strftime('%A %b %d %X')


if __name__ == '__main__':

    # Create or load the cron file (just a test file for now)
    try:
        crontab = CronTab(tabfile=CRONFILE)
        #crontab = CronTab(user='******')
    except:
        print("Couldn't load cron file:", CRONFILE)

    # Get events for this month (offset 0) and next month (offset +1)
    # to make sure we have a good list of streaming events
    for offset in [0, 1]:
        # get_page_json returns a list: the crafted URL as a string and the response as a json object
        called_url, response = get_page_json(SERVICES_URL, offset)
        # print 'called get_page_json with URL: %s' % called_url
        # Find events with a "Services" category. We'll set up streaming for all of these.
        # Build a list of all _future_ RecordableEvent objects
        try:
            for event in response['items']:
                if 'Stream' in event['categories']:
Пример #3
0
1: 59 23 * * * /home/oracle/scripts/alert_log_archive.sh >/dev/null 2>&1
表示每天23点59分执行脚本/home/oracle/scripts/alert_log_archive.sh
2: */5 * * * * /home/oracle/scripts/monitoring_alert_log.sh >/dev/null 2>&1
表示每5分钟执行一次脚本/home/oracle/scripts/monitoring_alert_log.sh
3: 0 20 * * 1-5 mail -s "**********" [email protected] < /tmp/maildata
周一到周五每天下午 20:00 寄一封信给 [email protected]
..............................................
关于 >/dev/null 2>&1 的解释:
0表示键盘输入
1表示标准输出
2表示错误输出.
'''

# 股票爬虫:

gupiao_cron = CronTab(user=True)  #定时启动股票程序
gupiao_job = gupiao_cron.new(
    command=
    'cd /data/gupiao && python2.7 /data/gupiao/main_gp.py > /data/mylog.log 2>&1'
)  #执行command命令
gupiao_job.setall('* * * * *')  #每天9点-16点,每分钟运行
gupiao_job.set_comment("gupiao_0.1")
gupiao_job.enable()
gupiao_cron.write()

gupiao_init_cron = CronTab(user=True)  #定时初始化
gupiao_init_job = gupiao_init_cron.new(
    command=
    'cd /data/gupiao && python2.7 /data/gupiao/gupiao/spiders/__init__.py > /data/mylog_sendeamil_init.log 2>&1'
)
gupiao_init_job.setall('*/5 * * * *')  #每天9点-16点,每半小时运行
Пример #4
0
from crontab import CronTab
# cron = CronTab('root')
cron = CronTab(tab="""
  * * * * * command
""")
job = cron.new(command='python test.py')  
job.minute.every(1)
job.enable()
print job.is_valid()
print job.is_enabled()
cron.write()  
Пример #5
0
 def schedule_job_check(self, job, expected):
     # Schedules a cron job to check completed tasks, employees out of hours
     cron = CronTab(user=config.cron['user'])
     job = cron.new(command=f'python3 check_jobs.py {expected} {self.name}')
     job.setall(datetime.today() + timedelta(hours=expected))
     cron.write()
Пример #6
0
 def test_bad_crontabs(self):
     self.assertRaises(ValueError, lambda: CronTab('*'))
     self.assertRaises(ValueError, lambda: CronTab('* *'))
     self.assertRaises(ValueError, lambda: CronTab('* * *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * * * * * *'))
     self.assertRaises(ValueError, lambda: CronTab('-1 * * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* mon-tue * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * feb-jan *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * * L'))
     self.assertRaises(ValueError, lambda: CronTab('* * * L *'))
     self.assertRaises(ValueError, lambda: CronTab('* L * * *'))
     self.assertRaises(ValueError, lambda: CronTab('L * * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* 1, * * *'))
     self.assertRaises(ValueError, lambda: CronTab('60 * * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* 25 * * *'))
     self.assertRaises(ValueError, lambda: CronTab('* * 32 * *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * 13 *'))
     self.assertRaises(ValueError, lambda: CronTab('* * * * 9999'))
Пример #7
0
 def __init__(  # pylint: disable=super-init-not-called
         self,
         inst_data=None,
 ):
     self._inst_data = inst_data
     self._cron_tab = CronTab(user=True)
Пример #8
0
import os
import sys

from crontab import CronTab

username = sys.argv[1]
entrypoint_path = os.getcwd()
my_cron = CronTab(user=username)
path_comm = 'PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin:$PATH'
docker_start_comm = 'cd {} && eval $(docker-machine env)'.format(
    entrypoint_path)
run_scraper = 'python entrypoint.py -c parse >/tmp/script_output.log 2>/tmp/err.log'
cron_comm = """{} && {} && {}""".format(path_comm, docker_start_comm,
                                        run_scraper)
job = my_cron.new(command=cron_comm)
job.minute.on(0)
job.enable()
my_cron.write()
Пример #9
0
#!/usr/bin/env python

import datetime
import time
import sys
sys.path.insert(0, '/home/pi/adhan/crontab')

from praytimes import PrayTimes
PT = PrayTimes()

from crontab import CronTab
system_cron = CronTab(user='******')

now = datetime.datetime.now()
strPlayFajrAzaanMP3Command = 'omxplayer -o local /home/pi/adhan/Adhan-fajr.mp3 > /dev/null 2>&1'
strPlayAzaanMP3Command = 'omxplayer -o local /home/pi/adhan/Adhan-Makkah.mp3 > /dev/null 2>&1'
strUpdateCommand = 'python /home/pi/adhan/updateAzaanTimers.py >> /home/pi/adhan/adhan.log 2>&1'
strClearLogsCommand = 'truncate -s 0 /home/pi/adhan/adhan.log 2>&1'
strJobComment = 'rpiAdhanClockJob'

#Set latitude and longitude here
#--------------------
lat = 42.288788
long = -71.551678

#Set calculation method, utcOffset and dst here
#By default system timezone will be used
#--------------------
PT.setMethod('ISNA')
utcOffset = -(time.timezone / 3600)
isDst = time.localtime().tm_isdst
Пример #10
0
from crontab import CronTab

my_cron = CronTab(user='******')

my_job = my_cron.new(
    command=
    'python3 /home/saketh/Desktop/TVLK/CompanyCodingTests/CoinSwitch/get_data.py',
    comment='saketh_cron')
my_job.minute.every(1)
my_cron.write()
Пример #11
0
#!/usr/bin/env python3
from crontab import CronTab

# Init cron.
cron = CronTab(user="******")
cron.remove_all()

# Add new cron job.
job = cron.new(command="/home/pi/piot/Week4/04_timesense.py")

# Job settings.
job.minute.every(1)
cron.write()
Пример #12
0
"""This file is made to install crontabs automatically for Gravimetrics """

from crontab import CronTab

# Run as this user; generally this will be pi
gravi_cron = CronTab(user='******')

# this is the command to be ran with this cronjob
cmd = 'python3 /gravi_control/gravi.py'
comment = 'Gravimetrics main script'

# Create the cronjob to run at 9 and 21 hours
cron_job = gravi_cron.new(cmd, comment)
cron_job.hour.on(9, 21)

# Commit the job
gravi_cron.write()

# Show our changes
print(gravi_cron.render())
Пример #13
0
 def setUp(self):
     self.crontab = CronTab(tabfile=os.path.join(TEST_DIR, 'data', 'test.tab'))
Пример #14
0
 def test_24_special_w(self):
     """Write Specials"""
     tab = CronTab(tabfile=os.path.join(TEST_DIR, 'data', 'specials.tab'))
     self.assertEqual(tab.render(), """@hourly hourly\n@daily daily\n@weekly weekly\n""")
     self.assertEqual(len(list(tab)), 3)
Пример #15
0
from crontab import CronTab

cron = CronTab(tabfile='/var/spool/cron/hulee', user='******')

print "print1"
for line in cron.lines:
    print line

print "new"
cron.new(command='/foo/bar', comment='SomeID')

#print "print2"
#job = cron[0]
#print job

#print "remove"
#cron.remove( job )

print "print3"
for line in cron.lines:
    print line

#print "remove"
#cron.remove_all()

print "write"
cron.write()

print "print4"
for line in cron.lines:
    print line
Пример #16
0
import os

import dotenv

from crontab import CronTab

dotenv.load_dotenv()
my_cron = CronTab(user=os.getenv('user'))
for job in my_cron:
    print(job)
scraper_job = my_cron.new(command=os.getenv('python_location') + " " +
                          os.getenv('scraper_location'))
scraper_job.hour.every(1)
alert_job = my_cron.new(command=os.getenv('python_location') + " " +
                        os.getenv('alert_location'))
alert_job.minute.every(30)
my_cron.write()
Пример #17
0
from crontab import CronTab

my_cron = CronTab(user='******')
my_cron.remove_all()
job = my_cron.new(
    command=
    'sudo /usr/bin/python /scrapy/python-spider-test/begin.py > /temp.log')
job.minutes.every(10)
my_cron.write()
job2 = my_cron.new(
    command=
    'sudo /usr/bin/python /scrapy/python-spider-test/sendMail.py > /temp.log')
job.minutes.every(15)
my_cron.write()
Пример #18
0
import os
from crontab import CronTab
from config import cronTiming
from config import *
from db import *

for mac in macAddresses:
    reset(mac)

db.dump()

cron = CronTab(user=True)
dirName = os.path.dirname(__file__)
dirName = os.path.realpath(dirName)
scriptPath = dirName + '/refresh.py'
job = cron.new(command='python3 ' + scriptPath, comment='plant-alert')
job.setall(cronTiming)

for job in cron:
    print(job)

cron.write()
Пример #19
0
 def _run_impossible(self, crontab, now):
     ct = CronTab(crontab)
     delay = ct.next(now, default_utc=True)
     assert delay is None, (crontab, delay, now,
                            now + datetime.timedelta(seconds=delay))
Пример #20
0
#!venv/bin/python

from crontab import CronTab

user_cron = CronTab(user='******')

job = user_cron.new(
    command='/home/aasoliz/Documents/Other/Commands/py/bitStats/job.sh')

job.minute.on(0)
job.day.every(1)

user_cron.write_to_user(user='******')
print user_cron.render()

assert job.is_valid()
Пример #21
0
def clear_all_tasks():
    os_user = getpass.getuser()
    cron_tabs = CronTab(user=os_user)
    cron_tabs.remove_all()
    cron_tabs.write()
Пример #22
0
 def generate(self):
     #Todo - allow host to be set as paramater
     host = "localhost"
     #Get port info from uri in case using non-stnadard mgmt port
     splunkd_uri = self._metadata.searchinfo.splunkd_uri
     port = splunkd_uri.split(":")[-1]
     
     #Owner will be set as who ever ran the search
     owner = self._metadata.searchinfo.owner
     app= self._metadata.searchinfo.app
     
     #Get token to authenticate to API to rerun searches
     token=self._metadata.searchinfo.session_key
     
     #Use rerun command earliest and latest as the outage period, this way can be set by time picker instead of as parameters
     outageStart = self._metadata.searchinfo.earliest_time
     outageEnd = self._metadata.searchinfo.latest_time
     
     # Get the rerun command search id - this is because Splunk was not killing the python script when search was cancelled
     # Use this to monitor the status of the search and if it is no longer "Running" exit the script
     rerunSid = self._metadata.searchinfo.sid
     
     #Compile regex to find searches
     filter = re.compile(self.regex)
     
     #Try to connect to Splunk API
     self.logger.info("[RERUN CMD]: Connecting to Splunk API...")
     try:
         #service = client.connect(host=host, port=port, token=token, owner=owner, app=app)
         service = client.connect(host=host, port=port, token=token)
         self.logger.info("[RERUN CMD]: Connected to Splunk API successfully")
     except Exception as e:
         self.logger.error("[RERUN CMD]: {}".format(e.msg))
         
     #Splunk not stopping script going to ping sid from here and stop script if cancelled by user
     #Todo - look in to getting specific job info based on sid instead of use for statement
     for job in service.jobs:
         if job.sid == rerunSid:
             rerunJob = job
             self.logger.debug(job.state)        
     #If for some reason script cant find the search that triggered it 
     if not rerunJob:
         self.logger.error("[RERUN CMD]: Rerun Job SID not found exiting...")
         sys.exit(1)
     
     # Main loop to find an rerun searches
     for search in service.saved_searches:
        # Does not rerun disabled searches
        if filter.search(search.name) and search.is_scheduled=="1" and search.disabled=="0":
             #Parse the Splunk cron schedule for the found search
             ct = CronTab(search['content']['cron_schedule'])
             
             #Get earliest and latest pattern for search
             dispatch_earliest = search['content']['dispatch.earliest_time']
             dispatch_latest = search['content']['dispatch.latest_time']
             
             # Start with runTime equal to outageStart, crontab will be used to set this to the next time scheduled search
             # would have ran before rerunning 
             runTime=outageStart
             while True:
                 # Check to see if the search has been cancelled by user
                 rerunJob.refresh()
                 if rerunJob.state.content.dispatchState!="RUNNING":
                     sys.exit()
                 
                 # Get next scheduled run time, and break if greater than outageEnd
                 runTime = runTime + ct.next(now=runTime,default_utc=False)
                 if runTime > outageEnd or rerunJob.state.content.dispatchState!="RUNNING":
                     self.logger.error(rerunJob.state.content.dispatchState)
                     break
                 
                 #Get new earliest and latest based on new search run time
                 earliest = self.getTimeRange(dispatch_earliest,runTime)
                 latest = self.getTimeRange(dispatch_latest,runTime)
                 
                 # Set search parameters and run search
                 kwargs_block = {'dispatch.earliest_time':earliest, "dispatch.latest_time":latest, "trigger_actions":self.trigger}
                 job = search.dispatch(**kwargs_block)
                 time.sleep(0.25)
                 #Couldn't pass blocking argument, so sleep until isDone
                 while job['isDone']!="1":
                     self.logger.debug("[RERUN CMD]: Percent {}".format(job['doneProgress']))
                     time.sleep(1)
                     job.refresh()
                 message = "{} ran sucessfully for scheduled time {}".format(search.name,runTime)
                 self.logger.info("[RERUN CMD]: {}".format(message))
                 #Return results
                 yield {"_time":time.time(), "Message":message,"Search":search.name, "MissedRunTime":runTime, "MissedEarliest":earliest,"MissedLatest":latest, "TriggerActions":self.trigger,"Finished":job['isDone'],"CompletionPercentage":float(job['doneProgress'])*100,"ScanCount":job['scanCount'],"EventCount":job['eventCount'],"ResultCount": job['resultCount']}
Пример #23
0
def del_rec_cron(user,name,minutes,hours,dom,month,dow):
	my_cron = CronTab(user=user)
        for job in my_cron:
          if job.comment == str(name):
	     my_cron.remove(job)
             my_cron.write()
Пример #24
0
def get_crontab(crontab_args):
    crontab_entry = " ".join(crontab_args[:5])
    CronTab(crontab_entry)
    return crontab_entry
Пример #25
0
from crontab import CronTab
cron = CronTab(user='******')
job = cron.new(command='/usr/bin/python /home/pi/Documents/updater.py')
job.minute.on(0)
job.hour.on(0)
cron.write()
Пример #26
0
    #
    # Time-lapse capture values section end

    # Create a lock file
    file=open("/tmp/"+lockfile,"w")
    file.write("#"+curdir)
    file.close()

    # Delete old TLID file if found
    try:
        os.remove(project_dir+"/"+idfile)
    except:
        False
    
    user_cron = CronTab(user=user)
    print("Old crontab:")
    for job in user_cron:
        print(job)
    
    # Clear user crontab
    user_cron.remove_all()
    print("User "+user+" crontab cleared")
    print("")
    
    # New crontab job
    timestr,cmd=cronstr(int(interval_min+interval_h*60))
    job = user_cron.new(cmd)
    job.setall(timestr)
    user_cron.write()
    
Пример #27
0
#!/usr/bin/env python3
from crontab import CronTab

#Init cron
cron = CronTab(user='******')

#Add new cron job
job = cron.new(command='/home/pi/A01/3ii_pushbullet.py')

#Job settings set to run every hour
job.hour.every(1)
cron.write()
Пример #28
0
def createCron():
    path = os.path.abspath(__file__)
    tsc_cron = CronTab(user=os.getlogin())
    job = tsc_cron.new(command="%s %s" % ('$(which python3)', path), comment="Teamspaceschoice weekly scheduler")
    job.setall('0 12 * * 5')
    tsc_cron.write()
Пример #29
0
def main():
    print('Welcome to the ScrapeBot setup')

    config = get_config()
    instance_name = check_minimal_config(config)

    print('Continuing to the database')
    print('- connecting to ' +
          config.get('Database', 'host', fallback='localhost'))
    try:
        engine = get_engine(config)
        base.metadata.create_all(engine)
        db = get_db(engine)
    except:
        print('- uh, there is a problem with connecting to your database ...')
        exit(3)
    print('- read tables: ' + ', '.join(base.metadata.tables.keys()))
    users = db.query(User).order_by(User.created).all()
    user = None
    if len(users) == 0:
        print(
            '- the database currently does not contain any users, so we will create a default one'
        )
        username = read_forcefully('- what name should this user listen to',
                                   'root')
        email = read_forcefully('- and what is this user\'s email address')
        user = create_user(db, username, email)
    else:
        print('- one or many users available')
        user = db.query(User).filter(User.name == 'root').first()
        if user is None:
            user = users[0]

    while read_bool_forcefully('Do you want to create another user'):
        username = read_forcefully('- what name should this user listen to')
        email = read_forcefully('- and what is this user\'s email address')
        create_user(db, username, email)

    print('Checking this instance')
    this_instance = db.query(Instance).filter(Instance.name == instance_name)
    print('- it is called ' + instance_name)
    if this_instance.count() == 0:
        db.add(Instance(name=instance_name, owner_uid=user.uid))
        db.commit()
        print('- instance newly registered and ascribed to user "' +
              user.name + '"')
    else:
        print(
            '- instance name already registered, meaning that it has been used elsewhere'
        )
        if read_bool_forcefully('- is this on purpose'):
            print('- okay, fair enough, proceeding ...')
        else:
            instance_name = read_forcefully(
                '- so how should this instance be called')
            config.add_value('Instance', 'Name', instance_name)
            config.write()
            print('- alright, updated "config.ini"')
            db.add(Instance(name=instance_name, owner_uid=user.uid))
            db.commit()
            print('- instance newly registered and ascribed to user "' +
                  user.name + '"')
    print('- browser-wise this instance will use ' +
          config.get('Instance', 'browser', fallback='Firefox'))

    print('Finishing up')
    print('- instance should be ready to use')
    print('- to run it once, use the script "scrapebot.py"')
    if platform.system() == 'Linux':
        print(
            '- to run it regularly and since you are using Linux, I recommend a cronjob'
        )
        os_user = getpass.getuser()
        if read_bool_forcefully('- install cronjob for ' + os_user + ' now'):
            cron = CronTab(user=os_user)
            cron.remove_all(comment='ScrapeBot // ' + instance_name)
            cronjob = cron.new(command='cd ' + os.getcwd() + ' && ' +
                               sys.executable +
                               ' scrapebot.py >> scrapebot_cron.log',
                               comment='ScrapeBot // ' + instance_name)
            cronjob.minute.every(2)
            cron.write()
    else:
        print(
            '- to run it regularly (which is what you want), you may want to use Windows Task Scheduler or the like'
        )
    print('---------')
    print(
        'Thanks for using; please direct any questions and pull requests to https://github.com/marhai/scrapebot'
    )
    db.close()
Пример #30
0
 def test_24_special_r(self):
     """Read Specials"""
     tab = CronTab(tabfile=os.path.join(TEST_DIR, 'data', 'specials_enc.tab'))
     self.assertEqual(tab.render(), """@hourly hourly\n@daily daily\n@daily midnight\n@weekly weekly\n@reboot reboot\n""")
     self.assertEqual(len(list(tab)), 5)