Exemplo n.º 1
0
def benchmark():
    result = {}
    features = FeatureExtraction()
    for root, dir_names, file_names in os.walk(path):
        for file_name in file_names:
            name = os.path.join(root, file_name)
            text = get_text(name)
            experience = features.get_work_experience(text)
            result[file_name] = experience

    return result
Exemplo n.º 2
0
    def post(self, request, *args, **kwargs):
        f = request.FILES.get('file', None)
        if not f:
            logger.debug(
                'Resume file not provided. Used against ResumeParseInternal endpoint'
            )
            return Response(
                {
                    'status': 'failure',
                    'message': 'Cannot proceed further. File is missing.'
                },
                status=status.HTTP_400_BAD_REQUEST)

        # Get resume_id or create on our own.
        resume_id = request.data.get('resume_id', None)
        if not resume_id:
            resume_id = str(uuid4())

        # Get file name and extension from provided file.
        f_name, f_ext = os.path.splitext(f.name.lower())
        # If file name is large; truncate.
        f_name[:40] if len(f_name) > 40 else f_name

        # Save resume to default storage.
        uploaded_fname = default_storage.save(resume_id + f_ext,
                                              ContentFile(f.read()))
        # Get path of stored resume.
        path = default_storage.url(default_storage.open(uploaded_fname).name)

        text = get_text(path)
        if not text or len(text) < 200:
            # Guess is that; it might be an image (in PDF).
            text = get_text_via_ocr(path)

        hash_value = get_sim_hash_for_resume_content(text)
        if hash_value:
            hash_value = hash_value.value

        serializer_data = create_resume_instance(path, text, f_name,
                                                 hash_value, request.user,
                                                 request.data)

        return Response(serializer_data)
Exemplo n.º 3
0
    def post(self, request):
        form = TrialUseCaseForm(request.POST, request.FILES)
        if form.is_valid():
            cleaned_data = form.cleaned_data
            user_email = cleaned_data.get('email_address', None)
            resume_file = request.FILES.get('file', None)
            file_name = resume_file.name.lower()
            name, ext = os.path.splitext(file_name)
            if settings.UPLOAD_TO_S3:
                path = upload_to_s3(resume_file, name, ext)
                print(("uploaded path", path))
            else:
                uploaded_file_name = default_storage.save(
                    "%s" % uuid4() + ext, ContentFile(resume_file.read()))
                path = default_storage.open(uploaded_file_name).name
            # path = upload_to_s3(resume_file, name, ext)
            cd = form.cleaned_data
            skills = cd['skills']
            text = get_text(path)

            # celery task
            content_hash = get_sim_hash_for_resume_content(text)
            hash_value = content_hash.value
            trial_user_instance = resume.models.TrialUser.objects.create(
                email_address=user_email, )
            resume_instance = ResumeModel.objects.create(
                trial_user=trial_user_instance,
                parse_status=ResumeModel.STATUS.processing)
            parse_resume.delay(path, text, resume_instance.id, skills,
                               file_name, hash_value)
            response = {
                'status': resume_instance.get_parse_status_display(),
                'resume_id': resume_instance.id
            }
            return Response(response)
        logger.debug(form.errors)
        return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
Exemplo n.º 4
0
 def setUp(self):
     self.text = get_text(path_to_test_data)
Exemplo n.º 5
0
def benchmark(quick_mode=False):
    global resume_list
    user = {}
    if quick_mode:
        resume_list = resume_list[:5]
    for resume in resume_list:
        response = extract_resume(resume)
        # github_username = get_github_username(text)
        # stackoverflow_userid = get_stackoverflow_userid(text)
        # stackoverflow_username = get_stackoverflow_username(text)
        # stack_user_details = {}
        # git_user_details = {}
        # repo_details = {}
        # if stackoverflow_userid is None:
        #     pass
        # else:
        #     stack_user_details = stackoverflow_user_details(stackoverflow_userid)
        # if github_username is None:
        #     pass
        # else:
        #     git_user_details = github_user_details(github_username)
        #     repo_details = git_user_details['repo_details']
        # github_url =
        text = get_text(resume)
        if text is None:
            pass
        else:
            urls = get_urls(text)
            categories = url_categorizer(urls, text)
        file_name = resume
        blog = ' '
        personal_website = ' '
        github_url = ' '
        stackoverflow_url = ' '
        linkedin_url = ' '
        bit_bucket = ' '
        gist_url = ' '
        other_urls = ' '

        if 'Blog' in list(categories['Websites'].keys()):
            blog = categories['Websites']['Blog']
        if 'Personal Website' in list(
                categories['Websites']['Personal Urls'].keys()):
            personal_website = categories['Websites']['Personal Urls'][
                'Personal Website']
        if 'Other Urls' in list(
                categories['Websites']['Personal Urls'].keys()):
            other_urls = categories['Websites']['Personal Urls']['Other Urls']
        if 'GitHub Url' in list(categories['Social Websites'].keys()):
            github_url = categories['Social Websites']['GitHub Url']
        if 'StackOverflow Url' in list(categories['Social Websites'].keys()):
            stackoverflow_url = categories['Social Websites'][
                'StackOverflow Url']
        if 'LinkedIn Url' in list(categories['Social Websites'].keys()):
            linkedin_url = categories['Social Websites']['LinkedIn Url']
        if 'BitBucket Url' in list(categories['Social Websites'].keys()):
            bit_bucket = categories['Social Websites']['BitBucket Url']
        if 'GitHub Gist Url' in list(categories['Social Websites'].keys()):
            gist_url = categories['Social Websites']['GitHub Gist Url']
        # social = {}
        # social = {'url_categories': categories}
        # # user['Basic'] = response
        # # user['Social'] = social
        # data = {'Basic': response, 'Social': social}
        # details = dict(chain(user.items(), data.items()))
        field_names = [
            'file_name', 'Personal Website', 'Blog', 'GitHub', 'LinkedIn',
            'StackOverflow', 'BitBucket', 'GitHub_Gist', 'Other urls'
        ]
        with open('output.csv', 'a') as csvfile:
            write = csv.DictWriter(csvfile, fieldnames=field_names)
            write.writeheader()
            write.writerow({
                'file_name': file_name,
                'Personal Website': personal_website,
                'Blog': blog,
                'GitHub': github_url,
                'LinkedIn': linkedin_url,
                'StackOverflow': stackoverflow_url,
                'BitBucket': bit_bucket,
                'GitHub_Gist': gist_url,
                'Other urls': other_urls
            })
        csvfile.close()
    # with open('json-out.json', 'w') as outfile:
    #     json.dump(details, outfile)

    return json.dumps(user)
Exemplo n.º 6
0
    def post(self, request, *args, **kwargs):
        # TODO(kaviraj): Need serious cleanup
        """

        Upload a Resume File and skills.
        ---
        parameters:
            - name: file
              type: file
            - name: skills
              type: string

        """
        if request.user.resumes.count() >= UserProfile.objects.get(
                user=request.user).limit:
            logger.debug('Resume upload limit exceeded for user: {}'.format(
                request.user))
            return Response(
                {
                    'status': 'Failed',
                    'message': 'Resume upload limit exceeded.'
                },
                status=status.HTTP_403_FORBIDDEN)

        form = UploadFileForm(request.POST, request.FILES)
        if form.is_valid():
            get_file = request.FILES['file']
            file_name = get_file.name.lower()
            name, ext = os.path.splitext(file_name)
            if settings.UPLOAD_TO_S3:
                path = upload_to_s3(get_file, name, ext)
            else:
                uploaded_file_name = default_storage.save(
                    "%s" % uuid4() + ext, ContentFile(get_file.read()))
                path = default_storage.open(uploaded_file_name).name

            cd = form.cleaned_data
            skills = ''
            text = get_text(path)
            if not text or len(text) < 200:
                text = get_text_via_ocr(path)
            # celery task
            content_hash = get_sim_hash_for_resume_content(text)
            hash_value = content_hash.value

            # TODO(kaviraj): Need to rethink about hashing. Seems like not scalable
            list_hash_values = filter(
                None,
                ResumeModel.objects.values().values_list(
                    'content_hash', flat=True).filter(trial_user__isnull=True))

            if not list_hash_values:
                resume_instance = ResumeModel.objects.create(
                    user=request.user,
                    parse_status=ResumeModel.STATUS.processing)
                resume_id = resume_instance.id
                parse_resume.delay(path, text, resume_id, skills, file_name,
                                   hash_value)
                resume_status = resume_instance.get_parse_status_display()
                new_response = {
                    'status': resume_status,
                    'resume_id': resume_id
                }
                return JsonResponse(new_response)

            check_present_values = check_hamming_distance(
                list_hash_values, hash_value)
            value = check_present_values[1]
            if check_present_values[0]:
                # TODO(kaviraj): Need to handle filter[0] index error?
                resume_instance = ResumeModel.objects.filter(
                    content_hash=str(value))[0]
                resume_id = resume_instance.id

                resume_skills = [
                    skill.name for skill in list(resume_instance.skills.all())
                ]
                new_skills = skills.lower().split(',')
                new_skills = [
                    skill.strip() for skill in new_skills if skill.strip()
                ]
                if set(resume_skills) == set(new_skills):
                    resume_status = resume_instance.get_parse_status_display()
                    new_response = {
                        'status': resume_status,
                        'resume_id': resume_id
                    }
                    return JsonResponse(new_response)

            resume_instance = ResumeModel.objects.create(
                user=request.user, parse_status=ResumeModel.STATUS.processing)
            resume_id = resume_instance.id
            parse_resume.delay(path, text, resume_id, skills, file_name,
                               hash_value)
            resume_status = resume_instance.get_parse_status_display()
            new_response = {'status': resume_status, 'resume_id': resume_id}
            # default_storage.headers.clear()
            return JsonResponse(new_response)

        logger.debug(form.errors)
        return JsonResponse(form.errors)