def setUp(self): """ Initialize variables. """ self.created_by = create_user() self.organisation = Organisation.objects.create( user=self.created_by, name='Big Company', description='We are everywhere!' ) self.category = JobCategory.objects.create( category='Engineering' ) self.title = 'Software Engineering Lead' self.description = 'We are excited to ...' self.allow_comments = True self.start_accepting_applications_at = timezone.now() self.stop_accepting_applications_at = timezone.now() + timedelta(days=30) self.employment_term = 'Full Term' self.seniority_level = 'Mid Level' self.location = 'Nairobi' self.job = Job( created_by=self.created_by, organisation=self.organisation, category=self.category, title=self.title, description=self.description, allow_comments=self.allow_comments, start_accepting_applications_at=self.start_accepting_applications_at, stop_accepting_applications_at=self.stop_accepting_applications_at, employment_term=self.employment_term, seniority_level=self.seniority_level, location=self.location )
class PyJobsJobApplication(TestCase): def setUp(self): self.job = Job(title="Vaga 3", workplace="Sao Paulo", company_name="XPTO", company_email="*****@*****.**", description="Job bem maneiro", premium=True, public=True) self.job.save() self.user = User.objects.create_user(username='******', email='*****@*****.**', password='******') self.client = Client() def test_check_applied_for_job_anon(self): request_client = self.client.get("/job/{}/".format(self.job.pk)) request = request_client.content.decode('utf-8') expected_response = "Você precisa estar logado para aplicar para esta vaga!" self.assertTrue(expected_response in request) def test_check_applied_for_job(self): self.client.login(username="******", password="******") request_client = self.client.get("/job/{}/".format(self.job.pk)) request = request_client.content.decode('utf-8') expected_response = "Aplicar para esta vaga pelo PyJobs" self.assertTrue(expected_response in request)
def setUp(self): self.job = Job( title="Vaga 2", workplace="Sao Paulo", company_name="XPTO", company_email = "*****@*****.**", description="Job bem maneiro", public=True ) self.job.save()
def setUp(self): self.job = Job( title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link = "http://www.xpto.com.br/apply", company_email = "*****@*****.**", description="Job bem maneiro" ) self.job.save()
def test_job(self): self.assertEqual(str(self.job1), 'test') self.assertEqual(str(self.job2), 'test2') self.assertEqual(self.job2.status, 'In progress') self.job2.update_job('test model', 'Completed') self.assertEqual(self.job2.status, 'Completed') jobs = Job.get_all() self.assertTrue(jobs[0].created > jobs[1].created) for job in Job.get_all(): job.delete() self.assertEqual(Job.get_last(), None)
def setUp(self): self.job = Job(title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link="http://www.xpto.com.br/apply", company_email="*****@*****.**", description="Job bem maneiro") self.job.save() self.home_page = resolve('/') self.request = HttpRequest() self.home_page_html = index(self.request).content.decode('utf-8')
def setUp(self): self.job = Job( title="Vaga 3", workplace="Sao Paulo", company_name="XPTO", company_email = "*****@*****.**", description="Job bem maneiro", premium=True, public=True ) self.user = User.objects.create_user( username='******', email='*****@*****.**', password='******') self.job.save()
def schedule_package(package): # Is this package already in the jobs table? jobs = Job.objects.filter(package = package) if jobs: logging.info("Package %s is already scheduled for updates." % package) return languages = Language.objects.all() for language in languages: job = Job(package=package, language=language) job.save() logging.info("Package %s is scheduled to be updated on %s" % (job.package.name, job.update_on.strftime("%B %d, %Y %H:%M")))
def setUp(self): self.job = Job( title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link="http://www.xpto.com.br/apply", company_email="*****@*****.**", description="Job bem maneiro", requirements="Job bem maneiro", ) self.job.save() self.job_request = HttpRequest() self.job_view_html = job_view(self.job_request, self.job.pk)\ .content.decode('utf-8')
def index(request): paginator = Paginator(Job.get_publicly_available_jobs(), 5) page = request.GET.get('page') try: public_jobs_to_display = paginator.page(page) except: public_jobs_to_display = paginator.page(1) context_dict = { "publicly_available_jobs": public_jobs_to_display, "premium_available_jobs": Job.get_premium_jobs(), "new_job_form": JobForm, "pages": paginator.page_range } return render(request, template_name="index.html", context=context_dict)
def init(testing=False): """Initialize the database with the default job""" if not os.path.exists(APPLICATION_DIRECTORY): os.mkdir(APPLICATION_DIRECTORY) if not os.path.exists(DATABASE_FILE) or testing: print(f"Creating TimeClok Database and default job.....") DB.create_tables(BaseModel) try: j = Job(name="default") j.save() s = State() s.save() s.set_job(j) except IntegrityError: DB.session.rollback()
def download_from_http(source_uri, rulesets_id=None): job = Job.create_job('download_from_http', source_uri) rulesets = list() if rulesets_id: for ruleset_id in rulesets_id: rulesets.append(RuleSetSuricata.get_by_id(ruleset_id)) try: source = SourceSuricata.get_by_uri(source_uri) if source is None: job.update_job("Error - source is None : " + str(source_uri), 'Error') return {"message": "Error - source is None : " + str(source_uri)} except Exception as e: logger.exception("Error for source to upload") job.update_job(repr_instance.repr(e), 'Error') return {"message": "Error for source to upload", "exception": str(e)} try: message = source.download_from_http(rulesets) job.update_job(message, 'Completed') logger.info("task - download_from_http : " + str(source_uri) + " - " + str(message)) except Exception as e: logger.exception("Error for source to upload") job.update_job(repr_instance.repr(e), 'Error') send_notification("Error for source " + str(source.uri), str(e)) return { "message": "Error for source " + str(source.uri) + " to upload", "exception": str(e) } return { "message": "Source " + str(source.uri) + " uploaded successfully by HTTP", "upload_message": message }
def analysis(request): """ Analysis home """ # get all jobs associated with record group analysis_jobs = Job.objects.filter(job_type='AnalysisJob') # get analysis jobs hierarchy analysis_hierarchy = AnalysisJob.get_analysis_hierarchy() # get analysis jobs lineage analysis_job_lineage = Job.get_all_jobs_lineage( organization=analysis_hierarchy['organization'], record_group=analysis_hierarchy['record_group'], exclude_analysis_jobs=False) # loop through jobs for job in analysis_jobs: # update status job.update_status() # render page return render( request, 'core/analysis.html', { 'jobs': analysis_jobs, 'job_lineage_json': json.dumps(analysis_job_lineage), 'for_analysis': True, 'breadcrumbs': breadcrumb_parser(request) })
def deploy_critical_stack(api_key): job = Job.create_job('deploy_critical_stack', api_key) try: critical_stack = CriticalStack.objects.get(api_key=api_key) except CriticalStack.DoesNotExist: # pragma: no cover logger.exception() job.update_job( "Error - Critical Stack is None - param id not set : " + str(api_key), 'Error') return { "message": "Error - Critical Stack is None - param id not set : " + str(api_key) } else: try: response_deploy_critical_stack = critical_stack.deploy() if response_deploy_critical_stack['status']: job.update_job('Deployed Critical Stack successfully', 'Completed') elif not response_deploy_critical_stack[ 'status']: # pragma: no cover if 'errors' in response_deploy_critical_stack: job.update_job( 'Error during the critical stack deployed', 'Error: ' + str(api_key) + " - " + repr_instance.repr( response_deploy_critical_stack['errors'])) logger.error("task - deploy_critical_stack : " + str(api_key) + " - " + repr_instance.repr( response_deploy_critical_stack['errors'])) return { "message": "Error for Critical Stack " + str(api_key) + " to deploy", "exception": str(response_deploy_critical_stack['errors']) } else: job.update_job('Error during the critical stack deployed', 'Error: ' + str(api_key)) logger.error("task - deploy_critical_stack : " + str(api_key)) return { "message": "Error for Critical Stack " + str(api_key) + " to deploy", "exception": " " } except Exception as e: # pragma: no cover logger.exception('Error during the critical stack deployed') job.update_job(repr_instance.repr(e), 'Error') send_notification("Critical stack " + str(api_key), str(e)) return { "message": "Error for Critical Stack " + str(api_key) + " to deploy", "exception": str(e) } return { "message": "Critical Stack " + str(api_key) + ' deployed successfully' }
def getJobs(self): pagesize = 25 page_count = self.count / pagesize self.url = self.buildUrl() print "url: " + self.url for page in range(0, page_count): nextpageurl = self.url + '&start=' + str(page * pagesize) print nextpageurl rawdata = Soup(urllib.urlopen(nextpageurl), "lxml") for i in range(0, pagesize): print "getting next job: " + str(datetime.now()) result = rawdata.results.contents[i] jobtitle = result.find({'jobtitle'}).getText() company = result.find({'company'}).getText() location = result.find({'formattedlocation'}).getText() date_text = result.find({'date'}).getText() date_posted = datetime.strptime(date_text, '%a, %d %b %Y %H:%M:%S %Z') snippet = result.find({'snippet'}).getText() joburl = result.find({'url'}).getText() try: j_exist = Job.objects.get(date_posted=date_posted, \ company=company, \ title=jobtitle) except ObjectDoesNotExist: # access url and grab full job description wordsoup = Soup(urllib.urlopen(joburl), "lxml") description = wordsoup.find('span', attrs={'id':'job_summary'})\ .getText().strip() sponsored = result.find({'sponsored'}).getText().title() expired = result.find({'expired'}).getText().title() j = Job(date_posted=date_posted, query=self.query, company=company, location=location, title=jobtitle, description=description, url=joburl, sponsored=sponsored, expired=expired, snippet=snippet) j.save()
def post(self, job_id): job = Job.find(job_id) job.pipeline.start_single_job(job) tracker = insight.GAProvider() tracker.track_event(category='jobs', action='manual_run', label=job.worker_class) return job
def get(self, pipeline_id): args = log_parser.parse_args() entries = [] urlfetch.set_default_fetch_deadline(300) next_page_token = args.get('next_page_token') page_size = 20 from core import cloud_logging project_id = app_identity.get_application_id() filter_ = 'logName="projects/%s/logs/%s"' % (project_id, cloud_logging.logger_name) filter_ += ' AND jsonPayload.labels.pipeline_id="%s"' % pipeline_id if args.get('worker_class'): filter_ += ' AND jsonPayload.labels.worker_class="%s"' \ % args.get('worker_class') if args.get('job_id'): filter_ += ' AND jsonPayload.labels.job_id="%s"' % args.get( 'job_id') if args.get('log_level'): filter_ += ' AND jsonPayload.log_level="%s"' % args.get( 'log_level') if args.get('query'): filter_ += ' AND jsonPayload.message:"%s"' % args.get('query') if args.get('fromdate'): filter_ += ' AND timestamp>="%s"' % args.get('fromdate') if args.get('todate'): filter_ += ' AND timestamp<="%s"' % args.get('todate') iterator = cloud_logging.client.list_entries( projects=[project_id], filter_=filter_, order_by=DESCENDING, page_size=page_size, page_token=next_page_token) page = next(iterator.pages) for entry in page: # print ' Page number: %d' % (iterator.page_number,) # print ' Items in page: %d' % (page.num_items,) # print 'Items remaining: %d' % (page.remaining,) # print 'Next page token: %s' % (iterator.next_page_token,) # print '----------------------------' if isinstance(entry.payload, dict) \ and entry.payload.get('labels') \ and entry.payload.get('labels').get('job_id'): job = Job.find(entry.payload.get('labels').get('job_id')) if job: log = { 'timestamp': entry.timestamp.__str__(), 'payload': entry.payload, 'job_name': job.name, 'log_level': entry.payload.get('log_level', 'INFO') } entries.append(log) next_page_token = iterator.next_page_token return {'entries': entries, 'next_page_token': next_page_token}
def setUpTestData(cls): now = datetime(year=2017, month=5, day=5, hour=12, tzinfo=pytz.UTC) before = now - timedelta(minutes=30) cls.job1 = Job.objects.create(name="test", probe="test", status='Error', result="", created=before, completed=now) cls.job2 = Job.create_job('test2', 'probe1')
class JobTest_02(TestCase): def setUp(self): self.job = Job( title="Vaga 2", workplace="Sao Paulo", company_name="XPTO", company_email = "*****@*****.**", description="Job bem maneiro", public=True ) self.job.save() def test_job_application_link(self): self.assertEqual(False, self.job.get_application_link()) def test_publicly_available(self): self.assertTrue((self.job in Job.get_publicly_available_jobs())) def test_premium_available(self): self.assertTrue((self.job not in Job.get_premium_jobs()))
def delete(self, job_id): job = Job.find(job_id) abort_if_job_doesnt_exist(job, job_id) if job.pipeline.is_blocked(): return { 'message': 'Removing of job for active pipeline is unavailable' }, 422 job.destroy() return {}, 204
def update(self, request): serializer = SoundSerializer(data=request.data) if serializer.is_valid(raise_exception=True): task = play_audio.delay(serializer.data['sound_path']) job = Job(name="play_audio", celery_id=task.id) job.save() # FIXME sync_job_db #play_audio.apply_async(serializer.data['sound_path'], job.id, link=sync_job_db.s()) # (play_audio.s(serializer.data['sound_path'], job.id) | sync_job_db.s(job.id)).delay() msg_out = "Asyncronous task. play_audio: " + serializer.data[ 'sound_path'] response = apirest_response_format( request=request, status=task.status, msg=msg_out, result="", job_id=job.id, ) return Response(response)
def index(request): search = request.GET.get('search', '') # Just to avoid search for less then 3 letters search = search if len(search) > 3 else None # Passing the value to Paginator paginator = Paginator(Job.get_publicly_available_jobs(search), 5) page = request.GET.get('page') try: public_jobs_to_display = paginator.page(page) except: public_jobs_to_display = paginator.page(1) context_dict = { "publicly_available_jobs": public_jobs_to_display, "premium_available_jobs": Job.get_premium_jobs(), "new_job_form": JobForm, "pages": paginator.page_range, "search": search if search is not None else '' } return render(request, template_name="index.html", context=context_dict)
def delete(self, job_id): job = Job.find(job_id) abort_if_job_doesnt_exist(job, job_id) if job.pipeline.is_blocked(): return { 'message': 'Removing of job for active pipeline is unavailable' }, 422 job.destroy() tracker = insight.GAProvider() tracker.track_event(category='jobs', action='delete') return {}, 204
def handle(self, *args, **options): # Check if exists any Job and create them if necessary for language in Language.objects.all(): for package in Package.objects.all(): try: Job.objects.get(language=language, package=package) except Job.DoesNotExist: Job(language=language, package=package).save() active_job = Job.objects.filter(active=True) if active_job: logging.info( "There's a job being run right now for %s - %s later" % (active_job[0].package.name, active_job[0].language.short_name)) logging.info("Will try again later!") return job = Job.objects.order_by('update_on')[0] logging.info("Running task for %s - %s" % (job.package.name, job.language.short_name)) logging.info("Setting job to 'active'.") job.active = True job.save() try: url = "%s.%s.po" % (job.package.src_url, job.language.short_name) logging.info("Fetching file from %s" % url) remote_file = urlopen(url) (fd, filename) = tempfile.mkstemp(job.package.name) f = os.fdopen(fd, "w") for line in remote_file.readlines(): f.write(line) f.close() logging.info("File has been downloaded.") try: po = pofile(filename, autodetect_encoding=True, encoding='utf-8') populate_db(po, job.language, job.package) except Exception, e: logging.error("Failed to open po file %s for %s" % (job.package.name, job.language.short_name)) logging.error("Error: %s" % str(e)) except Exception, e: logging.error("Failed to download the file located on %s" % url) logging.error("Error: %s" % str(e))
def post(self): args = parser.parse_args() pipeline = Pipeline.find(args['pipeline_id']) if pipeline.is_blocked(): return { 'message': 'Creating new jobs for active pipeline is unavailable' }, 422 job = Job(args['name'], args['worker_class'], args['pipeline_id']) job.assign_attributes(args) job.save() job.save_relations(args) return job, 201
def all_jobs(request): """ View to show all jobs, across all Organizations, RecordGroups, and Job types GET Args: include_analysis: if true, include Analysis type jobs """ # get all the record groups. record_groups = RecordGroup.objects.exclude(for_analysis=True) # capture include_analysis GET param if present include_analysis = request.GET.get('include_analysis', False) # get all jobs associated with record group if include_analysis: jobs = Job.objects.all() else: jobs = Job.objects.exclude(job_type='AnalysisJob').all() # get job lineage for all jobs if include_analysis: job_lineage = Job.get_all_jobs_lineage(exclude_analysis_jobs=False) else: job_lineage = Job.get_all_jobs_lineage(exclude_analysis_jobs=True) # loop through jobs and update status for job in jobs: job.update_status() # render page return render( request, 'core/all_jobs.html', { 'jobs': jobs, 'record_groups': record_groups, 'job_lineage_json': json.dumps(job_lineage), 'breadcrumbs': breadcrumb_parser(request) })
class JobDetailsViewTest(TestCase): def setUp(self): self.job = Job( title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link="http://www.xpto.com.br/apply", company_email="*****@*****.**", description="Job bem maneiro", requirements="Job bem maneiro", ) self.job.save() self.job_request = HttpRequest() self.job_view_html = job_view(self.job_request, self.job.pk)\ .content.decode('utf-8') def test_job_details_view(self): self.assertTrue(self.job.title in self.job_view_html) self.assertTrue(self.job.workplace in self.job_view_html) self.assertTrue(self.job.company_name in self.job_view_html) self.assertTrue(self.job.application_link in self.job_view_html) self.assertTrue(self.job.description in self.job_view_html) self.assertTrue(self.job.requirements in self.job_view_html)
class JobTest_01(TestCase): def setUp(self): self.job = Job( title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link = "http://www.xpto.com.br/apply", company_email = "*****@*****.**", description="Job bem maneiro" ) self.job.save() def test_job_created(self): self.assertTrue(Job.objects.exists()) def test_job_created_at(self): self.assertIsInstance(self.job.created_at, datetime) def test_job_str(self): self.assertEqual(str(self.job), "Vaga 1") def test_job_application_link(self): self.assertEqual(str(self.job.get_application_link()), "http://www.xpto.com.br/apply")
class HomeJobsViewsTest(TestCase): def setUp(self): self.job = Job(title="Vaga 1", workplace="Sao Paulo", company_name="XPTO", application_link="http://www.xpto.com.br/apply", company_email="*****@*****.**", description="Job bem maneiro") self.job.save() self.home_page = resolve('/') self.request = HttpRequest() self.home_page_html = index(self.request).content.decode('utf-8') def test_job_is_in_websites_home(self): self.assertEqual(self.home_page.func, index) def test_job_in_home(self): job_title = str(self.job) self.assertTrue(job_title in self.home_page_html) def test_job_url_is_in_home(self): job_url = "/job/{}/".format(str(self.job.pk)) self.assertTrue(job_url in self.home_page_html)
def put(self, job_id): job = Job.find(job_id) abort_if_job_doesnt_exist(job, job_id) if job.pipeline.is_blocked(): return { 'message': 'Editing of job for active pipeline is unavailable' }, 422 args = parser.parse_args() job.assign_attributes(args) job.save() job.save_relations(args) return job, 200
def dump(file_path: str = Argument(None)): """Export the database to a json file""" if file_path is None: date_str = datetime.now().strftime("%Y%m%d_%H%M%S") file_path = f"{APPLICATION_DIRECTORY}/time-clock-dump-{date_str}.json" print(f"Dumping the database to > {file_path}") dump_dict = { "time_clok_jobs": Job.dump(), "time_clok_state": State.dump(), "time_clok": Clok.dump(), "time_clok_journal": Journal.dump(), } s = json.dumps(dump_dict, default=to_json) with open(file_path, "w") as f: f.write(s)
logging.info("File has been downloaded.") try: po = pofile(filename, autodetect_encoding=True, encoding='utf-8') populate_db(po, job.language, job.package) except Exception, e: logging.error("Failed to open po file %s for %s" % (job.package.name, job.language.short_name)) logging.error("Error: %s" % str(e)) except Exception, e: logging.error("Failed to download the file located on %s" % url) logging.error("Error: %s" % str(e)) finally: # Extract what we need from the "old" job package = job.package language = job.language # Delete this job... logging.info(job.update_on) job.delete() logging.info("Job has been deleted.") # ... and create a new one, put it at the end of queue. try: job = Job.objects.get(language=language, package=package) except Job.DoesNotExist: job = Job(language=language, package=package) job.save() logging.info("New job has been created.") logging.info(job.update_on)
import random setup_environ(settings) from core.models import Job def get_month_range(date): year = date.year month = date.month first = datetime(year, month, 1) last = first + relativedelta(months = 1) - relativedelta(days=1) last = datetime(last.year, last.month, last.day, 23, 59, 59) return first, last date = datetime(2011, 01, 01).replace(tzinfo=utc) for i in range(0,20000): job = Job() job.name = 'foo' job.user_id = 1 job.application_id = 1 job.status = 'C' create = date + relativedelta(hours=random.uniform(0,17520)) # two years start = create + relativedelta(hours=random.uniform(0,6)) # end = start + relativedelta(hours=random.gauss(12,8)) # 12 hours, 8 hours end = start + relativedelta(hours=random.gauss(360,15)) # 15 days, 10 days job.create_time = create job.start_time = start job.end_time = end job.save() print "done."