def schedule(): """Schedule jobs from config and launch a blocking scheduler or trigger a job immediately""" click.echo('Scheduling jobs...') jobs = get_jobs() scheduler = BlockingScheduler(timezone=timezone(TIMEZONE)) for job_name in jobs.keys(): try: job_args, job_kwargs, cron = get_job_execution_info(job_name) except JobConfException as e: click.secho(str(e), err=True, fg='red') continue except JobFatalException as e: click.secho(str(e), err=True, fg='red') return scheduler.add_job(execute_wrapper, args=job_args, kwargs=job_kwargs, trigger=CronTrigger.from_crontab(cron), replace_existing=True, id=job_name) click.secho(f'- cron "{cron}" set for job "{job_name}".', fg='green') scheduler.print_jobs() click.secho('Scheduler started!', fg='green') click.echo('Press Ctrl+C to exit') try: scheduler.start() except (KeyboardInterrupt, SystemExit): click.echo('Bye')
def setUp(self): # Create local job for upload warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>") jobs.generate_job('test', 'test', dry_run=False) # Tests generate_job to some extent self.jobs = jobs.get_jobs(job_type='test')
def get_job_form(): job_dict = jobs.get_jobs() jid = request.form.get('jsonData') try: strJob = json.dumps(job_dict[jobs._generate_job_key(jid)]) jsonData = json.loads(strJob) return render_template('jobReturn.html', jsonData=jsonData, data=strJob) except: return abort(404)
def update_cpu_speeds(self): label = 'failed' try: jobslist = jobs.get_jobs(config) label = '' for item in self.instance_items: self.menu.remove(item) self.instance_items.clear() for job in jobslist: if label != '': label += ' ' if job['status'] in ['SUBMITTED']: label += '(' + job['type'] + ')' GLib.timeout_add_seconds( 10, self.handler_poll_onetime ) # fast poll whilst wait for it to start else: label += job['type'] item = Gtk.MenuItem() item.set_label('ssh to %s' % job['image']) item.connect("activate", self.handler_instance_ssh) item.target_image = job['image'] item.job_number = job['number'] item.show() self.menu.insert(item, 0) self.instance_items.append(item) item = Gtk.MenuItem() item.set_label('kill %s' % job['image']) item.connect("activate", self.handler_instance_kill) item.target_image = job['image'] item.job_number = job['number'] item.show() self.menu.insert(item, 0) self.instance_items.append(item) except Exception as e: label = 'exception occurred' try: print(traceback.format_exc()) except: print('exception in exception :-P') self.ind.set_label(label, "")
def update_cpu_speeds(self): label = 'failed' try: jobslist = jobs.get_jobs(config) label = '' for item in self.instance_items: self.menu.remove(item) self.instance_items.clear() for job in jobslist: if label != '': label += ' ' if job['status'] in ['SUBMITTED']: label += '(' + job['type'] + ')' GLib.timeout_add_seconds(10, self.handler_poll_onetime) # fast poll whilst wait for it to start else: label += job['type'] item = Gtk.MenuItem() item.set_label('ssh to %s' % job['image']) item.connect("activate", self.handler_instance_ssh) item.target_image = job['image'] item.job_number = job['number'] item.show() self.menu.insert(item, 0) self.instance_items.append(item) item = Gtk.MenuItem() item.set_label('kill %s' % job['image']) item.connect("activate", self.handler_instance_kill) item.target_image = job['image'] item.job_number = job['number'] item.show() self.menu.insert(item, 0) self.instance_items.append(item) except Exception as e: label = 'exception occurred' try: print(traceback.format_exc()) except: print('exception in exception :-P') self.ind.set_label(label, "")
def get_post_jobs(): """ Request handler for /jobs/ path. GET: returns the list of scheduled Jobs Args: None Returns: PagedJobs (str): A list of Jobs with paging metadata as json POST: adds one or more scheduled Jobs to the jobs list Args: jobs (list): the Jobs to be added as a list Returns: Job Ids (list) : new job ids in received order if response status >= 200 and <=499 Error (str): json error if response status >=500 """ if request.method == 'POST': jobs_data = request.json try: return _make_response(response=jobs.add_jobs(jobs_data)) except api_error.BadRequestError as e: return _make_error(400, e.message) except Exception as e: return _make_error(500, e.message) else: start_pos = request.args.get('start_pos', None) item_count = request.args.get('item_count', None) name = request.args.get('name', None) try: return _make_response(response=jobs.get_jobs( start_pos=start_pos, item_count=item_count, name=name)) except api_error.BadRequestError as e: return _make_error(400, e.message) except Exception as e: return _make_error(500, e.message)
def get_jobs(): return json.dumps(jobs.get_jobs())
def get_codery_result(codery_keywords: str) -> str: help_message = "*Help for Codery* : \n\n" \ "The bot responds to messages starting with @Codery.\n\n" \ "`@Codery contests` will return top Contests today, their dates, time left and the links to each contest.\n" \ "`@Codery top contest` also returns the top Contest result.\n" \ "`@Codery trending` returns the top trending ploblems across all programming platforms.\n" \ "`@Codery dictionary <search term>` returns the meaning of that word in an instant.\n" \ "`@Codery jokes` keeps your morale boosted with programming jokes.\n" \ "`@Codery jobs <searchword>` returns the top jobs for that search word.\n" \ "`@Codery news <keyword>` returns the news for that key word.\n" \ "`@Codery man <function>` returns the user manual of that function.\n" \ "`@Codery top <n> contests` will return n number of top contests at that time.\n \n" \ "Example:\n" \ " * @Codery contests\n" \ " * @Codery top contest\n" \ " * @Codery jokes\n" \ " * @Codery top 7 contests\n" \ " * @Codery dictionary computer\n" \ " * @Codery search code\n" \ " * @Codery jobs pyhton\n" \ " * @Codery jobs java\n" \ " * @Codery trending\n" \ " * @Codery man execvp\n" \ " * @Codery news corona" codery_keywords = codery_keywords.strip() codery_keywords_list = codery_keywords.split(" ") if codery_keywords == 'help': return help_message elif codery_keywords_list[0] == "todo": return todo.get_todo_response(codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "jobs": return jobs.get_jobs(codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "leaderboard": return leaderboard.get_leaderboard() elif codery_keywords_list[0] == "trending": return trendingproblems.get_problems() elif codery_keywords_list[0] == "search" or codery_keywords_list[ 0] == "dictionary": return dictionary.get_dictionary_response(codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "courses" or codery_keywords_list[ 0] == "course": return courses.get_courses(codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "jokes" or codery_keywords_list[ 0] == "joke": return geekjokes.get_joke(codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "calculator": return "The answer is" + calculator.get_calculator_response( codery_keywords, CoderyHandler) elif codery_keywords_list[0] == "news": return news.get_news_response(codery_keywords, CoderyHandler) elif codery_keywords == 'contests': URL = 'https://www.stopstalk.com/contests' content = requests.get(URL) soup = BeautifulSoup(content.text, 'html.parser') contentTable = soup.find('table', {"class": "centered bordered" }) # Use dictionary to pass key : value pair rows = contentTable.find_all('tr') lo = [] i = 0 for row in rows[1:]: lo.append("##") columns = row.find_all('td') for column in columns: if column.get_text() != "": lo.append((column.get_text()).strip() + "@@") lo.append((columns[4].find('a')['href']).strip()) i += 1 l1 = "The top contests and hackathons of today are \n" for r in lo: allContest = r.split("##") for eachContest in allContest: attrList = eachContest.split("@@") for attr in attrList: l1 += attr + "\n" return l1 # return a list of top contests elif codery_keywords == 'top contest': URL = 'https://www.stopstalk.com/contests' content = requests.get(URL) soup = BeautifulSoup(content.text, 'html.parser') contentTable = soup.find('table', {"class": "centered bordered" }) # Use dictionary to pass key : value pair rows = contentTable.find_all('tr') lo = [] i = 0 for row in rows[1:]: lo.append("##") columns = row.find_all('td') for column in columns: if column.get_text() != "": lo.append((column.get_text()).strip() + "@@") lo.append((columns[4].find('a')['href']).strip()) i += 1 if i == 1: break l1 = "" for r in lo: allContest = r.split("##") for eachContest in allContest: attrList = eachContest.split("@@") for attr in attrList: l1 += attr + "\n" return l1 # to return a list of n top contests elif len(codery_keywords_list) == 3: if codery_keywords_list[0] == "top" and codery_keywords_list[ 2] == "contests": n = int(codery_keywords_list[1]) else: help_message URL = 'https://www.stopstalk.com/contests' content = requests.get(URL) soup = BeautifulSoup(content.text, 'html.parser') contentTable = soup.find('table', {"class": "centered bordered" }) # Use dictionary to pass key : value pair rows = contentTable.find_all('tr') lo = [] i = 0 for row in rows[1:]: lo.append("##") columns = row.find_all('td') for column in columns: if column.get_text() != "": lo.append((column.get_text()).strip() + "@@") lo.append((columns[4].find('a')['href']).strip()) i += 1 if i == n: break l1 = "" for r in lo: allContest = r.split("##") for eachContest in allContest: attrList = eachContest.split("@@") for attr in attrList: l1 += attr + "\n" return l1 elif codery_keywords == '' or codery_keywords is None: return help_message
df['tuple'] = df.doc.apply(process_doc) df['token'] = df.tuple.apply(get_tokens) df['lemma'] = df.tuple.apply(get_lemmas) # df = df.filter(['review_id', 'tip_id', 'tokens', 'lemmas']) # Test with Dask. return df ###################### ###Helper Functions### ###################### # None # if __name__ == "__main__": main_logger = logging.getLogger(__name__+" Token Fixer") num_jobs = len(get_jobs('retoken')) # No module creates retoken jobs. Manually create these. bucket = get_bucket() for i in range(num_jobs): # Get a job and read out the datapath get_jobs(job_type='retoken') current_job = pop_current_job() asset = read_job(current_job)['File'] main_logger.info('Running job {}. Read file {}'.format(current_job, asset)) # Load the data datapath = download_data(asset) data = load_data(datapath)
def add_other_schedules(self): for job in get_jobs(): self.add_schedule( Schedule(job['name'], job['job_class'], job['minute'], job['hour'], job['args']))
# return tables[table_name] # raise NameError('Tablename not found. Aborting.') # tables = { # 'business': 'businesses', # 'user': '******', # 'checkin': 'checkins', # 'photo': 'photos', # 'tip': 'tips', # 'review': 'reviews', # } if __name__ == "__main__": write_logger = logging.getLogger(__name__ + ' DB-writer') num_jobs = len(get_jobs('post')) for i in range(num_jobs): # Get a job and read out the datapath current_job = pop_current_job() asset = read_job(current_job)['file'] tablename = read_job(current_job)['tablename'] write_logger.info('Running job {}. Read file {}'.format( current_job, asset)) # Load the data datapath = download_data(asset) data = load_data(datapath) # Build query package package = df_to_query(df=data, tablename=tablename)
return tuple[0] def get_subjectivity(tuple): return tuple[1] def add_sentiment(df): df['sentiment'] = df.text.apply(process_text) df['polarity'] = df.sentiment.apply(get_polarity) df['subjectivity'] = df.sentiment.apply(get_subjectivity) df = df.filter(['review_id', 'tip_id', 'polarity', 'subjectivity']) return df if __name__ == "__main__": main_logger = logging.getLogger(__name__+" Sentiment Adder") num_jobs = len(get_jobs('sentiment')) # No module creates sentiment jobs. Manually create these. for i in range(num_jobs): # Get a job and read out the datapath current_job = pop_current_job() asset = read_job(current_job).get('file') main_logger.info('Running job {}. Read file {}'.format(current_job, asset)) # Load the data datapath = download_data(asset) data = load_data(datapath) sentiment_df = add_sentiment(data) # Write Data to s3 savepath = asset+'_sentiment'
def api_get_jobs(): jobs = get_jobs() return jsonify(jobs)
def get_job(jid): job_dict = jobs.get_jobs() try: return json.dumps(job_dict[jobs._generate_job_key(jid)], indent=4) except: return abort(404)
def create(): if request.method == "GET": job_id = request.args.get("job_id") firm = request.args.get("firm") return render_template('generate/create.html', job_id=job_id, firm=firm) if request.method == 'POST': job_id = request.form['job_id'] race = request.form['race'] gender = request.form['gender'] firm = request.args.get("firm") # Allow for randomized race / gender if race == "Random": race = np.random.choice(['Black', 'White'], 1)[0] if gender == "Random": gender = np.random.choice(['Male', 'Female'], 1)[0] db = get_db() curs = db.cursor() error = None if not job_id: error = 'Job id is required.' if error is not None: flash(error) # Generate name based on data existing_names = get_existing_names(job_id) firstname, lastname, email = get_name(race, gender, existing_names) # Generate contact information phone = 4562311231 # Generate social socials = db.execute('SELECT ssn from SSNS' ' ORDER BY RANDOM()' ' LIMIT 1').fetchone() social = [s for s in socials][0] # Pull all addresses for state where job is located job_state, job_zip = db.execute( 'SELECT state, zipcode FROM job WHERE id = ?', (job_id, )).fetchone() potential_addys = db.execute( 'SELECT id, numb, street, city, region, zip' ' FROM addy WHERE region = ?', (job_state, )) potential_addys = pd.DataFrame(potential_addys.fetchall()) # Sample addresses within 1k of jobzip close_addys = potential_addys.loc[np.abs(potential_addys[5] - job_zip) <= 1000] if close_addys.shape[0] <= 1000: close_addys = potential_addys # Address addy_id = close_addys.sample(1).values[0, 0] # Pull all the schools from the state potential_schls = db.execute( 'SELECT id, name, state, zip' ' FROM schl WHERE state = ?', (job_state, )) potential_schls = pd.DataFrame(potential_schls.fetchall()) # Sample schools within 1k of jobzip close_schls = potential_schls.loc[np.abs(potential_schls[3] - job_zip) <= 1000] if close_schls.shape[0] <= 1000: close_schls = potential_schls # Actual schl schl_id = close_schls.sample(1).values[0, 0] # Age -- get random date between 1980 and 2000 dob = "{}/{}/{}".format( np.random.choice(range(1, 13), 1)[0], np.random.choice(range(1, 28), 1)[0], np.random.choice(range(1980, 2000), 1)[0]) grad_year = pd.to_datetime( dob, format="%m/%d/%Y").year + 18 # Year you turn 18 # Put into its table job_hist = get_jobs(firm, close_addys, dob) # Other stuff pin = 5494 over16 = True can_prove = True # Generate employment history (optional) # legal_work = True # responsibilities = # reason_for_leaving = # # References (optional, only years_known requires) # personal_or_professional = # ref_first, ref_last, phone, years_known, street, city, state, zipcode # # Generate education history # # Misc # relatives = False # if yes, who # referal = False # if yes, who # how_hear = 'KFC Website' # # Behavior (not required) how_many_jobs = len(job_hist) ever_terminated = 0 # explian = "" # Generate availability hours = 40 available_all_week = 1 notice = 'Less than two weeks' import datetime start_date = (datetime.date.today() + datetime.timedelta(1 * 365 / 12)).isoformat() # # Physical # can_lift_50 = True # can_spend_time_on_feet = True # specialized_training = "" # willing_to_look_ok = True # has_transport = True # Redirect details = (g.user['id'], job_id, firstname, lastname, gender, race, dob, phone, email, addy_id, hours, ever_terminated, available_all_week, notice, start_date, schl_id, grad_year, social) curs.execute( 'INSERT INTO app (user_id, job_id, firstname, lastname, gender, race, dob, phone, email,' ' addy_id, hours, ever_terminated, available_all_week, notice, start_date, schl_id, grad_year, social)' ' VALUES ({})'.format(", ".join(["?" for k in details])), details) lastrow = curs.lastrowid db.commit() # Insert job history for old_job in job_hist: db.execute( 'INSERT INTO work_hist (app_id, employer, position, addy_id, supervisor, start, end)' ' VALUES (?, ?, ?, ?, ?, ?, ?)', (lastrow, ) + old_job) db.commit() # Dictionary with full details full_details = { 'contact': [ firstname, lastname, phone, "{}+{}@gmail.com".format(email, lastrow), get_addy(addy_id) ], 'demos': [gender, race, dob], 'job_hist': get_job_hist(lastrow), 'avail': [hours, available_all_week, notice, start_date], 'schl': [get_schlname(schl_id), get_schladdress(schl_id), grad_year], 'id': lastrow, 'job_id': job_id, 'firm': firm, 'social': str(social), } return render_template('generate/show_app.html', details=full_details)
from datetime import datetime from pathlib import Path from flask import Flask, render_template, send_from_directory, abort import config from jobs import get_jobs ERR_STR = 'Error occurred during execution' app = Flask(__name__) notebooks = get_jobs() output_folder = Path(config.get_nb_config()['output_folder']) @app.route('/') def index(): data = [] for (j, j_data) in notebooks.items(): n_data = {} n_data['notebook'] = j n_data['cron'] = j_data['cron'] path = Path(output_folder) / j if path.exists(): runs = sorted([x for x in path.iterdir() if x.suffix == '.html'], reverse=True) n_data['nb_runs'] = len(runs) with runs[0].open() as nfile: n_data['last_status'] = 'error' if ERR_STR in nfile.read(
import jobs import checkchanges from os.path import join from os import path script_dir = path.dirname(path.realpath(__file__)) jobs = jobs.get_jobs() print(checkchanges.checkChanges(join(script_dir, 'nimbixinstances.txt'), jobs))