コード例 #1
0
async def jobs(q:str, l:str, page_id: int = 0):
    try:
        job_html = get_jobs(q, l, page_id)
        jobs_info = get_job_info(job_html)
        return jobs_info
    except Exception:

        raise HTTPException(400, 'Error unable to fetch jobs')  
コード例 #2
0
ファイル: main.py プロジェクト: developer119korea/web-scraper
def report():
    word = request.args.get('word')
    if word:
        word = word.lower()
        fromDB = db.get(word)
        if fromDB:
            jobs = fromDB
        else:
            jobs = get_jobs(word)
            db[word] = jobs
    else:
        return redirect("/")
    return render_template("report.html",
                           searchingBy=word,
                           resultsNumber=len(jobs),
                           jobs=jobs)
コード例 #3
0
def report():
    word = request.args.get("word")
    if (word):
        word = word.lower()

        existingJobs = db.get(word)
        if (existingJobs):
            jobs = existingJobs
        else:
            jobs = get_jobs(word)
            db[word] = jobs
    else:
        return redirect("/")
    return render_template("report.html",
                           resultsNumber=len(jobs),
                           searchingBy=word,
                           jobs=jobs)
コード例 #4
0
def report():
  word = request.args.get('word')
  if word:
    word = word.lower() #대문자 입력시 소문자 변환용
    existingJobs = db.get(word)
    if existingJobs:
      jobs = []
    else:
      jobs = get_jobs(word)
      db[word] = jobs
  else:
    return redirect("/")# 미입력시 홈으로 redirect
  return render_template(
    "report.html",
    searchingBy=word,
    resultsNumber=len(jobs),
    jobs=jobs) 
コード例 #5
0
ファイル: main.py プロジェクト: Yongbbbba/JobScraper
def report():
    word = request.args.get("word")
    if word:
        word = word.lower()
        JobDB = db.get(word)
        if JobDB:
            jobs = JobDB
        else:
            jobs = get_jobs(word)
            db[word] = jobs
    else:
        return redirect("/")
    return render_template("report.html",
                           searchingBy=word,
                           resultsNumber=len(jobs),
                           jobs=jobs
                           # job마다 html로 랜더링하기
                           )
コード例 #6
0
ファイル: main.py プロジェクト: inhalin/python-scraper
def report():
  query = request.args.get('query')
  if query:
    query = query.lower()
    existing_jobs = db.get(query)
    if existing_jobs:
      jobs = existing_jobs
    else:
      jobs = get_jobs(query)
      db[query] = jobs
  else:
    return redirect('/')
  return render_template(
    "report.html", 
    query=query,
    results_number=len(jobs),
    jobs=jobs
  )
コード例 #7
0
ファイル: app.py プロジェクト: hchang18/web-scraper-python
def report():
    # query arguments
    word = request.args.get('word')
    if word:
        word = word.lower()
        existingJobs = db.get(word)
        if existingJobs:
            jobs = existingJobs
        else:
            jobs = get_jobs(word)
            db[word] = jobs
    else:
        # we don't want anyone to access /report without any word
        # better redirect it to home
        return redirect("/")
    return render_template("report.html",
                           searchingBy=word,
                           resultsNumber=len(jobs),
                           jobs=jobs)
コード例 #8
0
def report():
    word = request.args.get("word")

    if word:
        word = word.lower()

        existingJobs = db.get(word)
        if existingJobs:
            jobs = existingJobs
        else:
            jobs = get_jobs(word)
            db[word] = jobs

    else:
        return redirect('/')

    return render_template('report.html',
                           searchingBy=word,
                           resultNumber=len(jobs),
                           jobs=jobs)
コード例 #9
0
def search():
    #request.args
    word = request.args.get("word")
    # 입력값이 None(null)이면 소문자처리, 입력값없으면 홈으로(/) redirect
    if word:
        word = word.lower()
        # 기존에 조회했던 기록이 있다면 fake DB에서 가져온다. (for Saving time)
        checkData = db.get(word)

        if checkData:
            jobs = checkData
        else:
            jobs = get_jobs(word)
            db[word] = jobs
    else:
        return redirect("/")

    return render_template("search.html",
                           word=word,
                           length=len(jobs),
                           jobs=jobs)
コード例 #10
0
ファイル: data_extract.py プロジェクト: RyanXJu/Portfolio
import scraper
import pandas as pd
path = "/usr/bin/google-chrome-stable"

# Data Science
df = scraper.get_jobs("Data Scientist", 1000, False, path, 10)
df.to_csv('data/data_scientist.csv')

# Data Engineering
df = scraper.get_jobs("Data Engineer", 1000, False, path, 10)
df.to_csv('data/data_engineer.csv')

# Data Analyst
df = scraper.get_jobs("Data Analyst", 1000, False, path, 10)
df.to_csv('data/data_analyst.csv')

# Machine learning engineer
df = scraper.get_jobs("Machine Learning Engineer", 1000, False, path, 10)
df.to_csv('data/ml_engineer.csv')
コード例 #11
0
        "Orange County, California", "Dallas, Texas", "Fort Worth, Texas",
        "Kansas City, Missouri", "Indianapolis, Indiana",
        "Salt Lake City, Utah", "Nashville, Tennessee"
    ]

    jobs_list = [
        "Scrum master", "Network administrator", "User interface developer",
        "Php developer", "Data architect", "Information technology manager",
        "Business intelligence developer", "User interface designer",
        "Product manager", "Technical account manager",
        "Systems administrator", "Mobile developer",
        "Quality assurance manager", "Software engineer",
        "Solutions architect", "User experience designer",
        "Database administrator", "Data engineer", "DevOps engineer",
        "Data scientist", "Hardware Engineer", "Salesforce administrator",
        "Cloud engineer"
    ]

    counter = 0
    THE_DATA = []
    for job in jobs_list:
        for city in city_state_list[CITY_NUMBER:CITY_NUMBER + 1]:
            jobs = scraper.get_jobs(job, city, 25, 10)
            for x in jobs:
                print(str(job) + " - " + str(city) + ": " + str(counter))
                counter = counter + 1
                THE_DATA.append(x)

    with open("./data/" + city + ".txt", "w") as f:
        f.write(json.dumps(THE_DATA))
コード例 #12
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 21:17:19 2020

@author: Johnson
"""

import scraper as gs
import pandas as pd

df = gs.get_jobs("data scientist", 500, False, 15)
df.to_csv('glassdoor_jobs.csv', index=False)
コード例 #13
0
import scraper as sp
import pandas as pd
df = sp.get_jobs("data scientist", 10, False, 2)
df.to_csv('Details.csv', index=False)
コード例 #14
0
'''
Created on 10/3/2020

tutorial: https://www.youtube.com/watch?v=GmW4F6MHqqs
'''

import scraper as sc

path = 'C:/Users/User/PycharmProjects/GlassdoorDataScienceJobs/chromedriver'

df = sc.get_jobs('data scientist', 1000, False, path, 15)

df.to_csv('glassdoor_jobs.csv', index=False)
コード例 #15
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jun  5 00:05:42 2020

@author: carso
"""
import scraper as scrape
import pandas as pd

df = scrape.get_jobs("data scientist", 1000, False,
                     "D:\Github\Data Science Project\chromedriver.exe")

df.to_csv("data_science_jobs.csv", index=False)
コード例 #16
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov  1 22:42:51 2020

@author: wraith
"""

import scraper
import pandas as pd

df = scraper.get_jobs('data scientist', 2000, False)