def generate_jobs(self): jobs = [] exec_time = self.exec_time for i in range(self.iterations): jobs.append( Job( line_task_id=self.id, line_id=self.line_id, device_id=self.device_id, desired_state=1, exec_time=exec_time, expire_time=exec_time + timedelta(minutes=1), ) ) jobs.append( Job( line_task_id=self.id, line_id=self.line_id, device_id=self.device_id, desired_state=0, exec_time=exec_time + timedelta(minutes=self.time), expire_time=exec_time + timedelta(minutes=self.time + 1), ) ) exec_time = exec_time + timedelta(minutes=self.time_sleep) self.expire_time = exec_time + timedelta(minutes=1) self.jobs = jobs return self
def generate_random_jobs(player_cities, current_city_id, count=30): """ Generate a set of random jobs for a city :param player_cities: Dict of current player cities :param current_city_id: Id of current city the jobs are generated at :param count: Number of jobs to generate :return: Dict of jobs """ logging.info( f'Generating {count} random job for city_id: {current_city_id}') player_city_ids = [ city_id for city_id in player_cities.keys() if city_id != current_city_id ] jobs = {} for _ in range(count): job = Job(origin_city_id=current_city_id, destination_city_id=random.choice(player_city_ids), revenue=calculate_job_revenue( current_city_id, random.choice(player_city_ids)), job_type=random.choice(['P', 'C'])) jobs[job.id] = job.serialize() return jobs
def test_train_mlengine(self): train_job.train_job['name'] = str(uuid.uuid4()) job = Job(train_job.train_job['name'], train_job.train_job) job.type = 'preprocess' job.init_temp(str(uuid.uuid4())) try: logging.info("step1") job.init_storage() logging.info("step2") if not hasattr(job, 'label_file') or job.label_file is None: job.label_file = 'corpus/' + job.prep_name + "/object-detection.pbtxt" job.init_labels() self.assertGreater(len(job.categories), 0) logging.info("step3") source = json.loads( job.download_to_string('corpus/' + job.prep_name + "/job_def.json")) job.project_name = source['project_name'] logging.info("step4") updateFileML(job) logging.info("step5") upload_model(job) logging.info("step6") upload_packages(job) logging.info("step7") start_ml_engine(job) logging.info("step8") history = json.loads( job.download_to_string( 'corpus/' + job.prep_name + "/job_history.json", )) upload_metadata(job, "training_jobs/" + job.name, history) finally: job.cleanup()
def statistics_team_jobs(body): if "team_id" not in body: return team = Team.select().where(Team.id == body["team_id"]).first() if not team: return qs = (Job.team == team) qs_count = (qs & (Job.status << ["normal", "private", "close"])) qs_open = (qs & (Job.status == "normal")) jobs_count = Job.select().where(qs_count).count() jobs_open = Job.select().where(qs_open).count() ts = TeamStatistics().select().where(TeamStatistics.team == team).first() if not ts: ts = TeamStatistics() ts.user = team.user ts.team = team ts.jobs = 0 ts.open_jobs = 0 ts.jobs = jobs_count ts.open_jobs = jobs_open ts.save() return
def post(self): account = AdminAccount.get(id=session.get("account_id")) if not account: return success(res={"error_code": 4001, 'msg': '请登入'}) post = request.form job_class = post.get("job_class") job_name = post.get("job_name") job_department = post.get('job_department') job_category = post.get("job_category") job_experience = post.get("job_experience") education = post.get("education") salary_start = post.get("salary_start") salary_end = post.get("salary_end") location = post.get("location") description = post.get("description") temptation = post.get("temptation") option = post.get("option") if (not job_class) or (not job_name) or (not job_department) or ( not job_category) or (not job_experience) or ( not education) or (not description) or (not location) or ( not temptation): return success(res={"error_code": 4001, 'msg': '字段缺失'}) data = { 'name': job_name, 'accounts': account, 'department': job_department, 'category': int(job_category), 'experience': int(job_experience), 'education': int(education), 'salary_start': int(salary_start), 'salary_end': int(salary_end), 'location': location, 'description': description, 'classes': int(job_class), 'temptation': temptation, } if option == 'update': job_id = post.get("job_id") rn = Job.update_doc(id=job_id, **data) else: rn = Job.create(**data) if rn: return success(res={'id': rn.id}) else: return success(res={'error_code': 4001, 'msg': u'保存失败'})
def job(): # { # "job_title": "full-stack Developer", # "job_skills": [ "React.js", "JavaScript" ] # } # curl -i -H "Content-Type: application/json" -X POST -d '{"job_title": "FrontEnd Engineer1", "job_skills": [ "HTML2", "C33SS", "JadevaScript", "Sfffass" ]}' http://localhost:5000/api/job data = request.json new_job = Job(title=data["job_title"]) for skill_name in data["job_skills"]: # Find skill in DB: skill = Skill.query.filter_by(name=skill_name).first() # check if skill already in DB: exists = skill is not None if not exists: skill = Skill(name=skill_name) # Create new skill row in DB db.session.add(skill) # store in DB: # add required skill to job opening new_job.skills.append(skill) db.session.add(new_job) db.session.commit() return job_schema.dump(new_job)
def get_job(packet_id): job = Job.find_by_id(packet_id) if not job: return jsonify({'message': 'Job not found'}), 404 response = job.serialize return jsonify({'job': response}), 200
def cycle(): while True: is_popped = False for q in qs: if q.is_popped or time.time() - q.last_check_at > 2: try: jobs = Job.objects( status=JOB_COMMIT, deleted_at=None, queue_type=q.name).order_by('created_at').all() q.is_popped = False for job in jobs: if q.semaphore.get_value() > 0: job.status = JOB_PROCESSING job.save() job_id = job.id q.semaphore.acquire() p = Process(target=worker, args=(job_id, q.semaphore)) p.start() q.is_popped = True else: q.is_popped = False break except Exception, ex: time.sleep(1) print ex is_popped |= q.is_popped # 如果队列里有数据, if not is_popped: time.sleep(2)
def get(self): job_id = request.args.get("job_id") if not job_id: return abort(404) rn = Job.get(id=job_id) if not rn: return abort(404) else: data = { 'id': rn.id, 'name': rn.name, 'department': rn.department, 'category': CATEGORY.get(rn.category), 'experience': EXPERIENCE.get(rn.experience), 'education': rn.education, 'salary_start': rn.salary_start, 'salary_end': rn.salary_end, 'location': rn.location, 'description': rn.description, 'temptation': rn.temptation, 'classes': JOB_CLASS.get(rn.classes) } return render_template('%s/index/job_detail.html' % self.template_base, data=data)
def get(self): job_id = request.args.get("job_id") if not job_id: return abort(404) rn = Job.get(id=job_id) if not rn: return abort(404) data = { 'id': rn.id, 'name': rn.name, 'department': rn.department, 'category': rn.category, 'experience': rn.experience, 'education': rn.education, 'salary_start': rn.salary_start, 'salary_end': rn.salary_end, 'location': rn.location, 'description': rn.description, 'temptation': rn.temptation, 'classes': rn.classes } return render_template('%s/index/edit_job.html' % self.template_base, data=json.dumps(data))
def test_run(self): jobs, students, apps = self.get_test_case() # Add the test objects to DB for obj in jobs + students + apps: obj.save() # Run the script MatchScript(app = self.app).run() # Check job objects job_ids = map(lambda job: job.id, jobs) db_jobs = Job.by_ids_dict(job_ids) assert(db_jobs[jobs[0].id].state == Job.State.FILLED) assert(db_jobs[jobs[1].id].state == Job.State.PARTIALLY_FILLED) assert(db_jobs[jobs[2].id].state == Job.State.UNFILLED) # Check app objects expected_matches = { students[0].id : jobs[0].id, students[1].id : jobs[1].id, students[2].id : jobs[1].id, students[3].id : None } app_ids = map(lambda app: app.id, apps) db_apps = Application.by_ids(app_ids) for app in db_apps: if expected_matches[app.student_id] == app.job_id: assert(app.state == Application.State.MATCHED) else: assert(app.state == Application.State.NOT_MATCHED)
def delete_job(user, packet_id): if not user: return jsonify({'message': 'Not authorized'}), 401 job = Job.find_by_id(packet_id) if not job: return jsonify({'message': 'Job not found'}), 404 job.delete_from_db() return jsonify({'message': 'success'}), 200
def create_job(): try: job_data = request.json job_load = JobSchema().load(job_data) job = Job(**job_load) job.save() job_dump = JobSchema().dump(job) return make_response(jsonify(job_dump), 201) except ValidationError as err: return make_response(jsonify(err.messages), 500) except SQLAlchemyError as err: logging.error('Error in Job creation: {}'.format(list(err.args))) return make_response( jsonify({ 'Error Message': 'There was an error, we coudn\'t create the Job' }), 500)
def get_jobs(): # Get and return all the jobs I had try: jobs = Job.get_all() jobs_dump = JobSchema(many=True).dump(jobs) return make_response(jsonify(jobs_dump)) except ValidationError as err: return make_response(jsonify(err.messages), 500)
def search_job(from_date, to_date, title): query = Query() query.on_calendar(from_date, to_date) query.by_title(title) search_result = pd.DataFrame( Job.find(collection="job", query=query._query)) return search_result
def Test_useGenericPreprocessor_ThrowExceptionIfUsedPropertyIsNone_NoneProperty(): job = Job() m = '' isCatched = False try: job.useGenericPreprocessor() except ArgumentNotSetError as e: isCatched = True m += e.argument except: pass m = ('\'useGenericPreprocessor\' catched none properties: ' + 'The following is not set[{}]'.format(m)) t.test(isCatched, m)
def _decorate(*args, **kwargs): arguments = json.dumps(dict(args=args, kwargs=kwargs)) # 简单参数类型,不用过于复杂 # 任务放入mongodb job = Job() job.module_name = module_name job.function_name = function_name job.arguments = arguments job.queue_type = queue_key job.save()
def test_process_all(self): for jb in prep_job.jobs: job = Job(jb['name'], jb) job.type = 'preprocess' job.init_temp(str(uuid.uuid4())) try: job.init_labels() job.init_storage() job.testcoco = {"info": { "description": "COCO 2017 Dataset", "url": "http://cocodataset.org", "version": "1.0", "year": 2018, "contributor": "COCO Consortium", "date_created": "2017/09/01" }, "licenses": [], "images": [], "categories": [], "annotations": [], } job.traincoco = {"info": { "description": "COCO 2017 Dataset", "url": "http://cocodataset.org", "version": "1.0", "year": 2018, "contributor": "COCO Consortium", "date_created": "2017/09/01" }, "licenses": [], "images": [], "categories": [], "annotations": [], } process_json(job) create_label_pbtxt(job) create_tf_example(job) create_tf_example(job, False) delete_staged(job) upload_metadata(job) finally: job.cleanup()
def add_job(): attributes = request.json job = Job(**attributes) try: session.add(job) session.commit() response = jsonify({"code": 1, "message": "Successfully added job"}) return make_response(response, 201) except: session.rollback() response = jsonify({"code": -1, "message": "role alreasy exist"}) return make_response(response, 401)
def post(self): """ Executes a prep job to create an image corpus for training. Use this method to start a prep job. """ job_def = request.json job_def['process_json'] = True # Hardcode to process json file from project folder job = Job(job_def['name'],job_def) job.type = 'preprocess' dt = newdt.now() job.start_time = int(dt.timestamp()*1000) job.request = {'full_path': request.full_path,'remote_addr':request.remote_addr,'method':request.method} jb = aug_queue.enqueue( preprocess, job,job_timeout=-1,result_ttl=86400,ttl=-1) jb.meta['job_def'] = job_def dt = newdt.now() jb.meta['job_init_time'] = str(int(dt.timestamp()*1000)) jb.status = 'Running' jb.save_meta() json_str = job.to_json_string() st = { 'BUCKET' : job.bucket, 'USE_GCS' : job.use_gcs, 'ACCESS_KEY' : access_key, 'SECRET_KEY' : secret_key, 'S3_URL' : s3_url } storage = Storage(st) storage.upload_data(json_str,'jobs/running/{}_0_preprocess_r_{}.json'.format(str(job.start_time),jb.id),contentType='application/json') storage.upload_data(json_str,'jobs/all/{}_0_preprocess_r_{}.json'.format(str(job.start_time),jb.id),contentType='application/json') return { "status": jb.status, 'job_id': jb.id, 'meta':jb.meta},201
def get_jobs(): query = {} request_data = request.args.to_dict() # Search by id if 'employer_id' in request_data: query['employer_id'] = ObjectId(request.args.get('employer_id')) # Search by location if 'company_name' in request_data: company_name = request_data.get('company_name') employer = Employer.find_one({'company_name': company_name}) if employer: query['employer_id'] = employer.id else: query['company_name'] = '-1' # Search by location if 'location' in request_data: query['location'] = request_data.get('location') # Execute query jobs = Job.find(query) job_ids = map(lambda job: job.id, jobs) # Get student applications for those jobs app_query = {'job_id': {'$in': job_ids}} # If I'm a student, only return my apps if g.student: app_query['student_id'] = g.user.id # Query for applications to those jobs apps = Application.find(app_query) # Convert to dictionary based on either student or employer context if g.student: apps = map(lambda app: app.to_dict_student(), apps) else: apps = map(lambda app: app.to_dict_employer(), apps) apps_by_job_id = dict(zip(map(lambda app: app['job_id'], apps), apps)) # Assemble results as dicts result = [] for job in jobs: _dict = job.to_dict() _dict['application'] = apps_by_job_id.get(str(job.id)) result.append(_dict) return ujson.dumps(result)
def job_view(body): if "uuid" not in body or "user_id" not in body: return job = Job.select().where(Job.job_uuid==body["uuid"]).first() if not job or job.user_id != body["user_id"]: return job.last_view_time = utils.now() job.save() qs = Proposal.update(is_view=True).where(Proposal.job==job) qs.execute() return
def run(self): Employer.objects.delete() employers = self.get_employers() for e in employers: print "Creating employer %s" % e employer = Employer(**e) employer.save() Student.objects.delete() students = self.get_students() experiences = self.get_experiences() for i in range(len(experiences)): experience_list = experiences[i] s = students[i] for e in experience_list: s.experience = [Experience(**e)] s.save() educations = self.get_educations() for s in students: education = choice(educations) s.education = [Education(**education)] s.save() employers = Employer.find({}) jobs = self.get_jobs() for i in range(len(jobs)): j = jobs[i] e = employers[i] j['employer_id'] = e['id'] job = Job(**j) job.save() jobs = Job.find({}) self.save_applications(jobs, students)
def setUp(self): self.employer_name = 'Test Employer 1' self.job_title = 'Test Job Title 1' now = datetime.now() term = Term.get_term(now.month) location = 'Toronto' job_levels = ['Junior', 'Intermediate', 'Senior'] openings = 10 applicants = 50 summary = datamanager.test_summary programs = ['MATH-Computer Science', 'ENG-Computer', 'ENG-Civil'] job_url = 'https://testurl.com' importer.import_job(employer_name=self.employer_name, job_title=self.job_title, term=term, location=location, levels=job_levels, openings=openings, applicants=applicants, summary=summary, date=now, programs=programs, url=job_url) self.employer_name = self.employer_name.lower() self.job_title = self.job_title.lower() location = location.lower() self.employer = Employer.objects(name=self.employer_name).no_dereference().first() self.job = Job.objects(id__in=[job.id for job in self.employer.jobs], title=self.job_title).first() self.assertEqual(self.employer.name, self.employer_name) self.assertEqual(self.employer.overall.rating, 0.0) self.assertEqual(self.employer.overall.count, 0) self.assertTrue(len(self.employer.warnings) == 0) self.assertTrue(len(self.employer.comments) == 0) self.assertEqual(self.job.title, self.job_title) self.assertEqual(self.job.url, job_url) self.assertEqual(self.job.term, term) self.assertEqual(self.job.location[0].name, location) self.assertTrue(int(round(self.job.location[0].longitude)) == -79) self.assertTrue(int(round(self.job.location[0].latitude)) == 44) self.assertEqual(self.job.openings, openings) self.assertEqual(self.job.remaining, openings) self.assertEqual(self.job.hire_rate.rating, 0.0) self.assertEqual(self.job.hire_rate.count, 0) self.assertEqual(self.job.applicants[0].applicants, applicants) self.assertEqual(self.job.applicants[0].date.year, now.year) self.assertEqual(self.job.applicants[0].date.month, now.month) self.assertEqual(self.job.applicants[0].date.day, now.day) self.assertEqual(set(self.job.levels), set(job_levels)) self.assertTrue(len(self.job.comments) == 0) self.assertEqual(set(self.job.programs), set(programs)) self.assertFalse(self.job.deprecated)
def tpminerWeatherSetup(logger): job = Job(logger=logger, label='TPMiner weather-crash') job.algorithm = tpminer job.minSupport = 0.15 job.maxGap = pa.to_timedelta('24:00:00') job.dataset = 'datasets/Weather-Crash.csv' preprocessor = WeatherCrashPreprocessor( 'datasets/weather.csv', 'datasets/Motor_Vehicle_Collisions_-_Crashes.csv', logger) job.preprocessor = preprocessor return job
def test_init_storage(self): for jb in prep_job.jobs: job = Job(jb['name'], jb) job.type = 'preprocess' job.init_temp(str(uuid.uuid4())) try: job.init_labels() job.init_storage() job.testcoco = {"info": { "description": "COCO 2017 Dataset", "url": "http://cocodataset.org", "version": "1.0", "year": 2018, "contributor": "COCO Consortium", "date_created": "2017/09/01" }, "licenses": [], "images": [], "categories": [], "annotations": [], } job.traincoco = {"info": { "description": "COCO 2017 Dataset", "url": "http://cocodataset.org", "version": "1.0", "year": 2018, "contributor": "COCO Consortium", "date_created": "2017/09/01" }, "licenses": [], "images": [], "categories": [], "annotations": [], } finally: job.cleanup()
def client_home_page(): global jobs_index job_type = request.form['job_type'] address = request.form['address'] description = request.form['description'] job = Job(jobs_index, job_type, address, description, session.get('username')) jobs_index += 1 global_dict['jobs'].append(job) print_jobs() return jobs_by_client()
def worker(job_id, semaphore): start_at = time.time() job = Job.objects(id=job_id).first() module_name = job.module_name function_name = job.function_name func = get_function_from_module_and_function_name(module_name, function_name) arguments = json.loads(job.arguments) args = arguments.get('args') kwargs = arguments.get('kwargs') results = '' try: results = func(*args, **kwargs) job.finish() except Exception, ex: job.exception() exc = traceback.format_exc() job.exceptions = exc
def update_jobs(self): for c_data in self.data: # If company has jobs if c_data['jobtitles']: jobs = c_data['jobtitles'].split('|JOB|') for job in [json.loads(j) for j in jobs]: job = Job(job['title'], job['role_type_id'], c_data['companyid']) self.data_storage.jobs.append(job) self.data_storage.add_job_to_company(job) # If company has job tags if c_data['jobtags']: job_tags = c_data['jobtags'].split('|TAG|') for job_tag in [json.loads(j) for j in job_tags]: job_tag = (job_tag['name'], job_tag['id']) self.data_storage.add_job_tag(job_tag) self.data_storage.add_jobtag_to_company( job_tag, c_data['companyid'])
def job_add(user_id=None, job_db_id=None): job_db_obj = storage.get('Job_db', job_db_id) if job_db_obj is None: abort(404, 'Not found') d = datetime.today() - timedelta(days=job_db_obj.date_post) print(d) new_job = Job(company=job_db_obj.company, position=job_db_obj.position, location=job_db_obj.location, description=job_db_obj.description, user_id=user_id, html_description=job_db_obj.html_description # link = job_db_obj.link, # date_post = d ) storage.new(new_job) storage.save() return render_template('job_detail.html', job_db_obj=job_db_obj, descrip=Markup(job_db_obj.html_description))
def test_train_mlengine_copy(self): train_job.train_job['name'] = str(uuid.uuid4()) job = Job(train_job.train_job['name'], train_job.train_job) job.type = 'preprocess' job.init_temp(str(uuid.uuid4())) try: logging.info("step1") job.init_storage() logging.info("step2") if hasattr(job, 'source_training') and job.source_training is not '': sjd = json.loads( job.download_to_string('training_jobs/' + job.source_training + "/job_def.json")) job.num_train_steps += sjd['num_train_steps'] job.model = sjd['model'] st = 'training_jobs/{}/'.format(job.source_training) dt = 'training_jobs/{}/'.format(job.name) job.copy_folder(st, dt) job.delete_cloud_file('{}{}'.format(dt, "job_def.json")) job.delete_cloud_file('{}{}'.format(dt, "job_history.json")) logging.info("step3") if not hasattr(job, 'label_file') or job.label_file is None: job.label_file = 'corpus/' + job.prep_name + "/object-detection.pbtxt" job.init_labels() self.assertGreater(len(job.categories), 0) logging.info("step4") source = json.loads( job.download_to_string('corpus/' + job.prep_name + "/job_def.json")) job.project_name = source['project_name'] logging.info("step5") updateFileML(job) logging.info("step6") upload_model(job) logging.info("step7") upload_packages(job) logging.info("step8") start_ml_engine(job) logging.info("step9") history = json.loads( job.download_to_string( 'corpus/' + job.prep_name + "/job_history.json", )) upload_metadata(job, "training_jobs/" + job.name, history) finally: job.cleanup()
def comment(job_id): data = json.loads(flask.request.data) comment_text = data['text'] comment_date = dateutil.parser.parse(data['date']) salary = data['salary'] or 0 rating = (float(data['rating']) / 5) or 0 if job_id is not None and comment_text: job = Job.objects(id=job_id).first() if not job: return render_template('404.html') logger.info(COMPONENT, 'Adding comment for job: {}'.format(job_id)) new_comment = Comment(comment=comment_text, date=comment_date, salary=salary, crawled=False, rating=AggregateRating(rating=rating, count=1)) job.update(push__comments=new_comment) return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
def update_job(**kwargs): """Update job. Keyword arguments: id -- Job ID summary -- Job summary location -- Location job was advertised programs -- Programs the job is specified for levels -- Levels job is intended for [Junior, Intermediate, Senior] openings -- Number of job openings index -- Boolean to indicate whether to index or not (default True) """ summary = kwargs['summary'] location = kwargs['location'].lower() levels = kwargs['levels'] programs = [] for program in kwargs['programs']: uw_program = Program.get_program(program) if uw_program: programs.append(uw_program) else: logger.error(COMPONENT, 'Error processing program: {}'.format(program)) openings = 0 try: if kwargs['openings']: openings = int(kwargs['openings']) or 0 except Exception: pass index = False if index in kwargs: index = kwargs['index'] job = Job.objects(id=kwargs['id']).first() remaining = job.openings # Job posting has decreased, some positions filled up if openings < job.openings: remaining = openings filtered_summary = engine.filter_summary(summary) summary_keywords = engine.get_keywords(filtered_summary, programs) filtered_summary_compare = re.sub(r'\W+', '', filtered_summary.lower().strip()).strip() job_summary_compare = re.sub(r'\W+', '', job.summary.lower().strip()).strip() employer = Employer.objects(jobs=kwargs['id']).first() # Job summary is not the same. In this case the employer most likely changed the job if not filtered_summary_compare == job_summary_compare: if openings >= 1: logger.info(COMPONENT, 'Job: {}: different summary detected, deprecating and creating new job..' .format(kwargs['id'])) job.update(set__deprecated=True) location = Location(name=location) keywords = [Keyword(keyword=k['keyword'], types=k['types']) for k in summary_keywords] # Assume new job so number of remaining positions is same as openings new_job = Job(title=job.title, summary=filtered_summary, year=job.year, term=job.term, location=[location], openings=openings, remaining=openings, levels=levels, programs=programs, url=job.url, keywords=keywords) new_job.save() employer.update(push__jobs=new_job) if index: elastic.delete_employer_waterlooworks(employer) elastic.delete_job_waterlooworks(employer, job) elastic.index_employer_waterlooworks(employer) elastic.index_job_waterlooworks(employer, new_job) else: logger.info(COMPONENT, 'Job: {}: different summary detected but invalid openings: {}, ignoring..' .format(job.title, openings)) else: logger.info(COMPONENT, 'Job: {}: updating for current term'.format(kwargs['id'])) location = Location(name=location) job.update(add_to_set__location=location, set__remaining=remaining, set__levels=list(set(levels + job.levels)), set__programs=list(set(programs + job.programs)), set__last_indexed=datetime.now()) if index: elastic.update_job_waterlooworks(employer, job)
def jobs(self): return Job.query(Job.crawl_key == self.key).order(Job.created_at).fetch()
def display_job(): employer_name = flask.request.args.get('employer') or '' job_title = flask.request.args.get('title') or '' employer = Employer.objects(name=employer_name).no_dereference().first() job = Job.objects(id__in=[job.id for job in employer.jobs], title=job_title, deprecated=False).first() if not employer or not job: return render_template('404.html') summary = job.summary.strip('-').strip('_').strip('-').strip('_').strip().replace('\n', '<br>')\ .replace('\r\n', '<br>') keywords = [] for keyword in job.keywords: color = '#949FB1' if keyword.keyword in colors.colors and colors.colors[keyword.keyword]['color']: color = colors.colors[keyword.keyword]['color'] keywords.append({ 'keyword': keyword.keyword, 'color': color }) job_applicants = 0 applicants = {} if len(applicants) > 0: for applicant in job.applicants: applicants[applicant.date] = applicant.applicants, now = datetime.now() earliest = max(date for date in applicants if date < now) job_applicants = applicants[earliest][0] comments = [] for comment in job.comments: comments.append({ 'comment': comment.comment, 'date': comment.date.isoformat(), 'salary': comment.salary, 'rating': comment.rating.rating * 5, 'crawled': comment.crawled }) job_data = { 'employer_name': string.capwords(employer.name), 'job_id': job.id, 'job_title': string.capwords(job.title), 'job_term': job.term, 'job_year': job.year, 'job_summary': summary, 'job_locations': [string.capwords(location.name) for location in job.location], 'job_openings': job.openings, 'job_remaining': job.remaining, 'job_hire_rate': int(job.hire_rate.rating * 100), 'job_programs': job.programs, 'job_levels': job.levels, 'job_keywords': keywords, 'job_applicants': job_applicants } return render_template('job.html', job_data=job_data, comments=comments, page_script='job')
def view_job(self, jid): job = Job.get_by_id(int(jid)) crawl = job.crawl_key.get() robot = crawl.key.parent().get() self.render_response('robot/job.html', job=job, crawl=crawl, robot=robot)
def import_job(**kwargs): """Import job. Keyword arguments: employer_name -- Employer name job_title -- Title of job summary -- Job summary year -- Year the job was advertised term -- Term job was advertised [Fall, Winter, Spring] location -- Location job was advertised openings -- Number of job openings remaining -- Number of job openings remaining applicants -- Number of applicants job has (Optional) levels -- Levels job is intended for [Junior, Intermediate, Senior] programs -- Programs the job is specified for url -- URL of job date -- Date job was crawled (useful for knowing exactly # of applicants at what time) index -- Boolean to indicate whether to index or not (default True) """ employer_name = kwargs['employer_name'].lower() job_title = kwargs['job_title'].lower() term = kwargs['term'] levels = [] for level in kwargs['levels']: uw_level = Term.get_level(level) if uw_level: levels.append(uw_level) else: logger.error(COMPONENT, 'Error processing level: {}'.format(level)) programs = [] for program in kwargs['programs']: uw_program = Program.get_program(program) if uw_program: programs.append(uw_program) else: logger.error(COMPONENT, 'Error processing program: {}'.format(program)) location = kwargs['location'].lower() openings = int(kwargs['openings']) remaining = int(kwargs['remaining']) if 'remaining' in kwargs else openings summary = kwargs['summary'] filtered_summary = engine.filter_summary(summary) summary_keywords = engine.get_keywords(filtered_summary, programs) date = kwargs['date'] year = date.year url = kwargs['url'] applicants = 0 try: if kwargs['applicants']: applicants = int(kwargs['applicants']) except Exception: pass index = False if index in kwargs: index = kwargs['index'] logger.info(COMPONENT, 'Importing job: {} from {}'.format(job_title, employer_name)) # If employer does not exist, create it if not Employer.employer_exists(employer_name): logger.info(COMPONENT, 'Employer: {} does not exist, creating..'.format(employer_name)) employer = Employer(name=employer_name) logger.info(COMPONENT, 'Creating job: {}'.format(job_title)) location = Location(name=location) applicant = Applicant(applicants=applicants, date=date) keywords = [Keyword(keyword=k['keyword'], types=k['types']) for k in summary_keywords] # New job so number of remaining positions is same as openings job = Job(title=job_title, summary=filtered_summary, year=year, term=term, location=[location], openings=openings, remaining=remaining, applicants=[applicant], levels=levels, programs=programs, url=url, keywords=keywords) job.save() job.reload() employer.jobs.append(job) employer.save() employer.reload() if index: elastic.index_employer_waterlooworks(employer) elastic.index_job_waterlooworks(employer, job) # Employer already exists else: employer = Employer.objects(name=employer_name).no_dereference().first() logger.info(COMPONENT, 'Employer: {} already exists'.format(employer_name)) # If job does not exist, create it if not employer.job_exists(job_title): logger.info(COMPONENT, 'Creating job: {}'.format(job_title)) location = Location(name=location) applicant = Applicant(applicants=applicants, date=date) keywords = [Keyword(keyword=k['keyword'], types=k['types']) for k in summary_keywords] # New job so number of remaining positions is same as openings job = Job(title=job_title, summary=engine.filter_summary(summary), year=year, term=term, location=[location], openings=openings, remaining=remaining, applicants=[applicant], levels=levels, programs=programs, url=url, keywords=keywords) job.save() job.reload() employer.update(push__jobs=job) if index: elastic.update_employer_waterlooworks(employer) elastic.index_job_waterlooworks(employer, job) # Job already exists else: logger.info(COMPONENT, 'Job: {} already exists'.format(job_title)) job = Job.objects(id__in=[job.id for job in employer.jobs], title=job_title).first() if not year >= job.year: raise DataIntegrityError('Job: {} by {} cannot be advertised before {}' .format(job_title, employer_name, job.year)) filtered_summary_compare = re.sub(r'\W+', '', filtered_summary.lower().strip()).strip() job_summary_compare = re.sub(r'\W+', '', job.summary.lower().strip()).strip() # Job summary is not the same. In this case the employer most likely changed the job if not filtered_summary_compare == job_summary_compare: if openings >= 1: logger.info(COMPONENT, 'Job: {}: different summary detected, deprecating and creating new job..' .format(job_title)) job.update(set__deprecated=True) location = Location(name=location) applicant = Applicant(applicants=applicants, date=date) keywords = [Keyword(keyword=k['keyword'], types=k['types']) for k in summary_keywords] # Assume new job so number of remaining positions is same as openings new_job = Job(title=job_title, summary=filtered_summary, year=year, term=term, location=[location], openings=openings, remaining=remaining, applicants=[applicant], levels=levels, programs=programs, url=url, keywords=keywords) new_job.save() new_job.reload() employer.update(push__jobs=new_job) if index: elastic.delete_employer_waterlooworks(employer) elastic.delete_job_waterlooworks(employer, job) elastic.index_employer_waterlooworks(employer) elastic.index_job_waterlooworks(employer, new_job) else: logger.info(COMPONENT, 'Job: {}: different summary detected but invalid openings: {}, ignoring..' .format(job_title, openings)) # Job is the same (same title and description) else: # If job is being advertised in new term if year != job.year or term != job.term: logger.info(COMPONENT, 'Job: {}: being advertised in new term, updating..'.format(job_title)) # Add hire ratio for previous term hire_ratio = float(job.openings - job.remaining) / job.openings job.hire_rate.add_rating(hire_ratio) location = Location(name=location) applicant = Applicant(applicants=applicants, date=date) hire_rate = AggregateRating(rating=job.hire_rate.rating, count=job.hire_rate.count) job.update(set__year=year, set__term=term, add_to_set__location=location, set__openings=openings, set__remaining=remaining, push__applicants=applicant, set__hire_rate=hire_rate, set__levels=levels, set__programs=programs, set__url=url, set__last_indexed=datetime.now()) if index: elastic.update_job_waterlooworks(employer, job) # Job is being updated. We need to update location, openings, levels, remaining, hire_rate, applicants else: logger.info(COMPONENT, 'Job: {}: updating for current term'.format(job_title)) remaining = job.remaining # Job posting has decreased, some positions filled up if openings < remaining: remaining = openings location = Location(name=location) applicant = Applicant(applicants=applicants, date=date) job.update(add_to_set__location=location, set__remaining=remaining, set__levels=list(set(levels + job.levels)), push__applicants=applicant, set__programs=list(set(programs + job.programs)), set__url=url, set__last_indexed=datetime.now()) if index: elastic.update_job_waterlooworks(employer, job)
def create(): """Creates a new outgoing fax""" account_id = Account.authorize(request.values.get('api_key')) if account_id == None: return jsonify(api_error('API_UNAUTHORIZED')), 401 ip = fix_ip(request.headers.get('x-forwarded-for', request.remote_addr)) if request.method == 'POST': uploaded_file = request.files['file'] v = request.values.get if uploaded_file or v('body'): data = { 'account_id': account_id, 'ip_address': ip, 'destination': v('destination'), 'send_authorized': v('send_authorized', 0), 'cover': v('cover', 0), 'cover_name': v('cover_name'), 'cover_address': v('cover_address'), 'cover_city': v('cover_city'), 'cover_state': v('cover_state'), 'cover_zip': v('cover_zip'), 'cover_country': v('cover_country'), 'cover_phone': v('cover_phone'), 'cover_email': v('cover_email'), 'cover_company': v('cover_company'), 'cover_to_name': v('cover_to_name'), 'cover_cc': v('cover_cc'), 'cover_subject': v('cover_subject'), 'cover_status': v('cover_status','review'), 'cover_comments': v('cover_comments'), 'callback_url': v('callback_url') } if uploaded_file: data['filename'] = uploaded_file.filename else: data['body'] = v('body') o(data) try: job = Job(**data); job.validate() except ValidationError, err: return jsonify(api_error(err.ref)), 400 db.session.add(job) db.session.commit() if uploaded_file: binary = uploaded_file.stream.read() else: binary = job.body.replace("\r\n", "\n").encode('utf-8') redis_conn = Redis.from_url(current_app.config['REDIS_URI']) q = Queue('high', connection=redis_conn) q.enqueue_call(func=initial_process, args=(job.id, binary), timeout=300) return jsonify(job.public_data()) else: return jsonify(api_error("JOBS_NO_ATTACHMENT")), 400
def import_comment(**kwargs): """Import comment from RateMyCoopJob. Keyword arguments: employer_name -- Employer name job_title -- Title of job comments: -- Array of comments comment -- Comment comment_date -- Date comment was submitted. Note: in non-standard form such as: 5 years ago, 3 weeks ago etc salary -- Job salary (hourly) rating -- Job rating out of 5 (1 - 5 stars on ratemycoopjob) """ employer_name = kwargs['employer_name'].lower() job_title = kwargs['job_title'].lower() # If employer alias exists (ex. Research in motion -> Blackberry), use instead if employer_name in employer_alias.aliases: employer_name = employer_alias.aliases[employer_name].lower() # If employer does not exist if not Employer.objects.search_text("\"{}\"".format(employer_name)).count() > 0: logger.info(COMPONENT, 'Employer: {} does not exist, ignoring..'.format(employer_name)) return logger.info(COMPONENT, 'Importing comments for job: {} from employer: {}'.format(job_title, employer_name)) employer = Employer.objects.search_text("\"{}\"".format(employer_name)).no_dereference().first() # Iterate through all comments for index, comment_obj in enumerate(kwargs['comments']): comment = comment_obj['comment'] comment_date = _get_comment_date(comment_obj['comment_date']) salary = float(comment_obj['salary']) rating = float(comment_obj['rating']) / 5 # If job does not exist add to employer if not employer.job_exists(job_title): if employer.comment_exists(comment=comment, date=comment_date, salary=salary, rating=rating): logger.info(COMPONENT, 'Comment: {} already exists for employer: {}, ignoring' .format(index, employer_name)) else: logger.info(COMPONENT, 'Adding comment: {} to employer: {}'.format(index, employer_name)) new_comment = Comment(comment=comment, date=comment_date, salary=salary, crawled=True, rating=AggregateRating(rating=rating, count=1)) employer.update(push__comments=new_comment) # Job already exists else: job = Job.objects(id__in=[job.id for job in employer.jobs], title=job_title).first() if job.comment_exists(comment=comment, date=comment_date, salary=salary, rating=rating): logger.info(COMPONENT, 'Comment: {} already exists for job: {} for employer: {}, ignoring' .format(index, job_title, employer_name)) else: logger.info(COMPONENT, 'Adding comment: {} for job: {} from {}'.format(index, job_title, employer_name)) new_comment = Comment(comment=comment, date=comment_date, salary=salary, crawled=True, rating=AggregateRating(rating=rating, count=1)) job.update(push__comments=new_comment)