def upload(): if request.method == 'POST': print(request.files['fileUploaded']) File = request.files['fileUploaded'] if File.filename == '': return '<h1>No File</h1>' if File: filename = secure_filename(File.filename) File.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) data = ResumeParser( os.path.join(app.config['UPLOAD_FOLDER'], filename)).get_extracted_data() conn = sqlite3.connect('users.db') skillset = data.get('skills') skills = "" for s in skillset: skills = skills + ";" + s row = [ listToString(data.get('name')), listToString(data.get('email')), listToString(data.get('mobile_number')), skills, listToString(data.get('experience')), listToString(data.get('college_name')), listToString(data.get('degree')), listToString(data.get('designation')), listToString(data.get('company_names')), filename ] print(row) conn.execute("INSERT INTO USERS VALUES (NULL,?,?,?,?,?,?,?,?,?,?)", (row)) conn.commit() print("Records created successfully") conn.close() return '<h1>File saved!</h1>'
def get_prediction(filename, details): name = 'Sample DE' data = ResumeParser(filename, skills_file='skills.csv').get_extracted_data() df = pd.DataFrame({'Skills': data.get("skills")}) df['Email'] = data.get("email") df['Contact_Number'] = data.get("mobile_number") df['Name'] = name try: df['Experience'] = data.get("total_experience") except: df['Experience'] = 0 try: df['Education'] = data.get("education")[0] except: df['Education'] = 'Did not catch that!' a = [] a = df["Skills"].tolist() b = concatenate_list_data(a) df['Skills'] = b df = df[[ "Name", "Email", "Contact_Number", "Education", "Experience", "Skills" ]] df = df.drop_duplicates() model = load(open('MB.pkl', 'rb')) wn = WordNetLemmatizer() corpus1 = [] for i in range(len(df)): review = re.sub('[^a-zA-Z]', ' ', str(df['Skills'][i])) review = review.lower() review = review.split() review = [ wn.lemmatize(word) for word in review if not word in stopwords.words('english') ] review = ' '.join(review) corpus1.append(review) tf_fit = load(open('TF_FIT.pkl', 'rb')) X = tf_fit.transform(corpus1) pred = model.predict(X) df['Best_Suited_Role'] = pred df['Name'] = details['name'] df['Email'] = details['email'] df['Education'] = details['edu'] df['Contact_Number'] = details['mobno'] df['Experience'] = details['expe'] df = df.to_dict() return df
def handleResume(request): if request.method == 'POST': BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) print('post') resume = request.FILES.get('resume', None) print(resume) if resume: saving = Resume(resume=resume) saving.save() media_path = os.path.join(BASE_DIR, 'resumes') lpart = str(saving.resume).split('/') full_path = os.path.join(media_path, lpart[1]) data = ResumeParser(str(full_path)).get_extracted_data() candidate = Candidate( name=data.get('name'), email=data.get('email'), phone=data.get('mobile_number'), experience=float(data.get('total_experience')), total_skills=len(data.get('skills')), designation=data.get('designation'), company="N/A" if data.get('company_names') is None else data.get('company_names')) candidate.save() return render(request, "app/home.html", {}) return render(request, "app/cvform.html", {})
def home(request): msg = None skills = None allowed_file_type = ['application/pdf'] if request.method == 'POST': resume_file = request.FILES.get('resume') file_path = None if resume_file: if resume_file.content_type in allowed_file_type: try: fs = FileSystemStorage() filename = fs.save(resume_file.name, resume_file) file_path = os.path.join(fs.location, filename) parsed_data = ResumeParser(file_path).get_extracted_data() except Exception as e: msg = f"Error occurred while parsing the CV. Detail error msg: {str(e)}" else: skills = parsed_data.get('skills') finally: if file_path and os.path.isfile(file_path): os.remove(file_path) else: msg = "Please provide pdf and docx document" else: msg = "Please upload your resume" return render(request, 'home.html', context={'msg': msg, 'skills': skills})
def handle_uploaded_file(cv): """ Extracts skills mentioned in a CV. Saves uploaded CV in a temporary location and uses pyresparser to extract all necessary informatiion from that CV. After extraction, Uploaded file is deleted. Parameters: cv (file): CV uploaded form UI Returns: list of string: List of skills extracted from CV. Might be empty list if it is unable to extract skills """ upload_path = os.path.join(settings.BASE_DIR, "uploads", cv.name) with open(upload_path, "wb+") as destination: for chunk in cv.chunks(): destination.write(chunk) try: data = ResumeParser(upload_path).get_extracted_data() except Exception: data = {"skills": []} finally: os.remove(upload_path) return data.get("skills", [])
def index(request): if request.method == 'POST': #save the file file = request.FILES['file'] file_name = default_storage.save(file.name, file) # Reading file from storage file = default_storage.open(file_name) file_url = default_storage.url(file_name) #Use resume Parser library to read the informationn # Start the stopwatch / counter t1_start = process_time() data = ResumeParser(file_url).get_extracted_data() #get only skills values from the data dictionary dict_df=data.get("skills") #convert dictionary objects to string result = str(dict_df) result = json.dumps(result) #default_storage.delete(file_name) # Stop the stopwatch / counter t1_stop = process_time() #print("Elapsed time:", t1_stop, t1_start) #print("Elapsed time during the whole program in seconds:",t1_stop-t1_start) #processing_time= t1_stop-t1_start # response = {'Skills': result,} # 'time': format(t1_stop-t1_startime, '.2f'), # response = { # 'upload_file_path': upload_file_path, # 'Name': data['name'], # 'Email': data['email'], # 'Skills': data['skills'], # 'time': format(stop_time - start_time, '.2f'), # } return render(request, 'skillreader/index.html', context=result) else: return render(request, 'skillreader/index.html')
def handleResume(request): if request.method == 'POST': base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) print('post') resume = request.FILES.get('resume', None) print(resume) if resume: saving = Resume(resume=resume) saving.save() media_path = os.path.join(base_dir, 'Resumes') l_part = str(saving.resume).split('/') full_path = os.path.join(media_path, l_part[1]) data = ResumeParser(str(full_path)).get_extracted_data() print(data.get('total_experience')) print(data.get('skills')) skills = data.get('skills') predefined_skill = Skill.objects.all() skills_score = 0 for skill in skills: skill = skill.casefold() for pre_skill in predefined_skill: if pre_skill.skill_name.casefold() == skill: skills_score = skills_score + pre_skill.score print(skills_score) experiences_skills_combined_score = float( data.get('total_experience')) + skills_score print(experiences_skills_combined_score) candidate = Candidate( name=data.get('name'), email=data.get('email'), phone=data.get('mobile_number'), total_experiences=float(data.get('total_experience')), total_skills=len(data.get('skills')), designation="N/A" if data.get('designation') is None else data.get('designation'), company="N/A" if data.get('company_names') is None else data.get('company_names'), skills_score=skills_score, experiences_skills_combined_score= experiences_skills_combined_score) candidate.save() return render(request, "Authority/sorted_list.html", {}) return render(request, "Candidate/cv_form.html", {})
def predict(): if request.method == 'POST': if not request.form['fname'] or not request.form['message']: print('Please enter all the fields', error) else: message = request.form['message'] if (len(message) > 10): def eval_string(my_post): c = cv.transform([tokeniser(my_post)]) x = idf_transformer.transform(c) ie = lr_ie.predict_proba(x).flatten() ns = lr_ns.predict_proba(x).flatten() tf = lr_tf.predict_proba(x).flatten() jp = lr_jp.predict_proba(x).flatten() score = [0, 0, 0, 0, 0, 0, 0, 0, 0] score[0] = ((ie[1] + ns[1] + tf[0] + jp[0]) / 4) * 100 #Calculated all the 8 personality types #introvert score[1] = ie[0] * 100 #extrovert score[2] = ie[1] * 100 # intuitive score[3] = ns[0] * 100 #sensor score[4] = ns[1] * 100 # thinker score[5] = tf[0] * 100 # feeler score[6] = tf[1] * 100 # judger score[7] = jp[0] * 100 # perciever score[8] = jp[1] * 100 return score scores_list = eval_string(message) personality_score = scores_list[0] personality_score = int(round(personality_score)) my_prediction = personality_score f = request.files['upload'] f.save( os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename))) resume_data = ResumeParser(f.filename).get_extracted_data() print(resume_data) #extracting data from resume college = (resume_data.get("college")) email = (resume_data.get("email")) mobilenumber = (resume_data.get("mobile_number")) skills = (resume_data.get("skills")) experience = resume_data.get("total_experience") #Calculating SKills Score out of 40 skills_score = ((len((text & set(skills)))) / len(text)) * 40 #Calculating Experience Score out of 40 if (experience > req_experience[1]): experience_score = 40 elif (experience < req_experience[0]): experience_score = 0 else: experience_score = (experience / req_experience[1]) * 40 candidate_total = experience_score + skills_score + ( personality_score * 0.2) experience_score = experience_score * 2.5 skills_score = skills_score * 2.5 # candidates = database.query.order_by(database.total_score.desc()).all() #fetch them all in one query # rank=candidates.id #GETTING PERSONALITY TYPE SCORES introvert = (int(round(scores_list[1]))) extrovert = (int(round(scores_list[2]))) intuitive = (int(round(scores_list[3]))) sensor = (int(round(scores_list[4]))) thinker = (int(round(scores_list[5]))) feeler = (int(round(scores_list[6]))) judger = (int(round(scores_list[7]))) perciever = (int(round(scores_list[8]))) data = database(request.form['fname'], request.form['message'], personality_score, skills_score, experience_score, candidate_total, college, email, mobilenumber, introvert, extrovert, sensor, intuitive, thinker, feeler, judger, perciever) db.session.add(data) db.session.commit() print('Record was successfully submitted') else: my_prediction = 3 return "YOUR RECORD WAS SUCESSFULLY SUBMITTED"
def getTotalExperience(self, f): data = ResumeParser(f).get_extracted_data() total_experience = data.get('total_experience', 'No experience found') return total_experience
def getExperience(self, f): data = ResumeParser(f).get_extracted_data() experience = data.get('experience', 'No experience found') return experience
def getSkills(self, f): data = ResumeParser(f).get_extracted_data() candidate_skills = data.get('skills', 'No skills found') return candidate_skills
def getName(self, f): data = ResumeParser(f).get_extracted_data() candidate_name = data.get('name', 'No Name Found') return candidate_name