def _get_prediction(img_path, threshold): """ Gets the prediction of an image given a threshold. :param img_path: Image path. :param threshold: Threshold. :return: Predicted masks, boxes and classes. """ with Timer('Transform image'): with Image.open(img_path).convert('RGB') as img: img = image_utils.resize_aspect(img) img.save(img_path, format='JPEG', quality=95) transform = torch_transform.Compose([torch_transform.ToTensor()]) img_transformed = transform(img) with Timer('Predict'): prediction = model([img_transformed]) with Timer('Get results'): prediction_score = list(prediction[0]['scores'].detach().numpy()) prediction_t = [prediction_score.index(x) for x in prediction_score if x > threshold][-1] masks = (prediction[0]['masks'] > 0.5).squeeze().detach().cpu().numpy() prediction_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(prediction[0]['labels'].numpy())] prediction_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(prediction[0]['boxes'].detach().numpy())] masks = masks[:prediction_t+1] prediction_boxes = prediction_boxes[:prediction_t+1] prediction_class = prediction_class[:prediction_t+1] return masks, prediction_boxes, prediction_class
def instance_segmentation_api(image_path, object_list): """ Cut every object of a list from an image. :param image_path: Image path. :param object_list: List of objects to cut. :return: Output path of the generated image. """ masks, boxes, prediction_cls = _get_prediction(image_path, MODEL_THRESHOLD) with Timer('Apply mask'): img = cv2.imread(image_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if masks.any(): rgb_mask_list = np.zeros(masks[0].shape) for i in range(len(masks)): if prediction_cls[i] in object_list: rgb_mask_list += masks[i] rgb_mask = _random_colour_masks(rgb_mask_list) img = cv2.addWeighted(img, 1, rgb_mask, 1, 0) with Timer('Generate image result'): image_path_output = f'{".".join(image_path.split(".")[:-1])}_output.png' plt.figure(figsize=(img.shape[0] * PIX_TO_INCH, img.shape[1] * PIX_TO_INCH)) plt.axis('off') plt.imshow(img) plt.savefig(image_path_output, bbox_inches='tight', dpi=175) del img return image_path_output
def select_option_from_drop_down(self, option, drop_down): self.try_and_click(drop_down) Timer.sleep() drop_down_element = self.driver.find_element_by_xpath( '//div[@data-automation-label="{0}"]/../../..'.format(option)) self.try_and_click(drop_down_element) Timer.sleep()
def fill(self): base = super(MyInformationPage, self) self.driver.find_element_by_xpath( '//*[@id="textInput.nameComponent--uid12-input"]').send_keys( self.user.first_name) Timer.sleep() self.driver.find_element_by_xpath( '//*[@id="textInput.nameComponent--uid13-input"]').send_keys( self.user.last_name) Timer.sleep() self.driver.find_element_by_xpath( '//*[@id="textInput.phone--uid19-input"]').send_keys( self.user.phone_number) self.driver.find_element_by_xpath( '//*[@id="dropDownSelectList.sources-input--uid20-input"]/div[1]/div' ).click() Timer.sleep() drop_down_element = self.driver.find_element_by_xpath( '//div[@id="dropDownSelectList.sources-input-entry-3"]') base.try_and_click(drop_down_element) Timer.sleep() self.driver.find_element_by_xpath( '//button[@data-automation-id="wd-CommandButton_next"]').click() Timer.sleep() base.click_next_button()
def fill(self): base = super(SelfIdentityPage, self) check_boxes = self.driver.find_elements_by_xpath('//div[@data-automation-id="checkboxPanel"]') answer = InputOutput.input_yes_no('\n\nDo you wish to answer a question on your disability(/ies), if any?') if not answer: base.try_and_click(check_boxes[2]) else: InputOutput.output("How do I know if I have a disability?\nYou are considered to have a disability if " "you have a physical or mental impairment or medical condition that substantially " "limits a major life activity, or if you have a history or record of such an " "impairment or medical condition.\nDisabilities include, but are not limited to: \n- " "Blindness\n- Deafness\n- Cancer\n- Diabetes\n- Epilepsy\n- Autism\n- Cerebral " "palsy\n- HIV/AIDS\n- Schizophrenia\n- Muscular dystrophy\n- Bipolar disorder\n- " "Major depression\n- Multiple sclerosis (MS)\n- Missing limbs or partially missing " "limbs\n- Post-traumatic stress disorder (PTSD)\n- Obsessive compulsive disorder\n- " "Impairments requiring the use of a wheelchair\n- Intellectual disability (" "previously called mental retardation)") disabilities_answer = InputOutput.input_yes_no('Do you have or have had any of the above disabilities?') if disabilities_answer: base.try_and_click(check_boxes[0]) else: base.try_and_click(check_boxes[1]) self.driver.find_element_by_xpath('(//div[@data-automation-id="textInput"])[6]/input').send_keys(self.full_name) Timer.sleep() base.try_and_click(self.driver.find_element_by_xpath('//span[@data-automation-id="dateSectionMonth"]')) ActionChains(self.driver).send_keys(time.strftime("%m%d%Y")).perform() base.click_next_button()
def login(): try: with Timer('Get Auth URL'): auth_url = spotify_api.get_auth_url() return response.make(error=False, response=dict(redirect=auth_url)) except Exception as e: log.error( f'Exception while processing {login.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)
def playlist(code, github_user): try: with Timer('Request token retrieving'): access_token = spotify_api.get_access_token(code) if not access_token: return response.make(error=True, message=MESSAGE_TOKEN_NOT_FOUND) with Timer('Get profile data'): user_id = spotify_api.get_current_user_id(access_token) if not user_id: return response.make(error=True, message=MESSAGE_SPOTIFY_NOT_FOUND) with Timer('Playlist generation'): playlist_id, playlist_url = spotify_api.post_playlist( access_token, user_id, github_user) if not playlist_id and not playlist_url: return response.make(error=True, message=MESSAGE_SPOTIFY_PLAYLIST_ERROR) with Timer('Retrieve commits from user'): commit_messages = github_api.get_commit_messages(github_user) if not commit_messages: return response.make(error=True, message=MESSAGE_COMMIT_NOT_FOUND) with Timer('Retrieve most common words'): most_common_words = nltk.extract_most_common(commit_messages) with Timer('Search for tracks'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(access_token, word) for word in most_common_words] track_uri_list = list( pool.imap(spotify_api.search_for_tracks, thread_args)) track_uri_list = [t for t in track_uri_list if t] with Timer('Add tracks to the playlist'): success = spotify_api.add_tracks_to_playlist( access_token, playlist_id, track_uri_list) if not success: return response.make(error=True, message=MESSAGE_SPOTIFY_TRACK_ERROR) return response.make(error=False, response=dict(url=playlist_url)) except Exception as e: log.error( f'Exception while processing {playlist.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)
def __wait_till_other_filter_drop_downs_refresh(): Timer.sleep(2)
def run(): with Timer('Accessing to Log in page'): browser = helper.init_browser()
def post(): """ Main function for /cut endpoint. :return: JSON response. """ try: body = request.json with Timer('Validate input data'): required_parameters = ['objects'] if not all(x in body for x in required_parameters): return jsonify( make_response( True, message= f'{required_parameters} body parameters are required.') ), 400 if not all(o in COCO_INSTANCE_CATEGORY_NAMES for o in body['objects']): return jsonify( make_response( True, message= 'Some objects from the list will not be detected.') ), 400 if bool('image_url' in body) == bool('image_base64' in body): return jsonify( make_response( True, message= 'image_url (x)or image_base64 has to be specified') ), 400 if 'return_white_bg' not in body: body['return_white_bg'] = False with Timer('Download image'): if 'image_url' in body: image_path = image_utils.download(body['image_url']) elif 'image_base64' in body: image_path = image_utils.decode(body['image_base64']) else: image_path = None if not image_path: return jsonify( make_response(True, message='Wrong image specified.')), 400 with Timer('Generate image'): output_image_path = instance_segmentation_api( image_path, body['objects']) with Timer('Removing white color'): if not body['return_white_bg']: image_utils.remove_white(output_image_path) with Timer('Encoding image'): encoded_string = image_utils.encode(output_image_path) with Timer('Remove input and output images'): if os.path.exists(image_path): os.remove(image_path) if os.path.exists(output_image_path): os.remove(output_image_path) return jsonify(make_response(False, image_base64=encoded_string)), 200 except Exception as e: return jsonify(make_response(True, message=f'Unexpected error: {e}')), 400 finally: gc.collect()
job_index = int( InputOutput.input('Enter Sl.No of Job to apply for : ')) if job_index in range(0, len(job_results)): email_address = InputOutput.input('Email address : ') first_name = InputOutput.input('First Name : ') last_name = InputOutput.input('Last Name : ') phone_number = InputOutput.get_valid_phone_number() gender = InputOutput.input('Gender', ['Male', 'Female', 'Undeclared']) user = User(first_name, last_name, phone_number, email_address, InputOutput.get_valid_password(), gender) answers = Answers(False, False, False, False, False, ["India"], True, True) job_application = JobApplication(driver, job_results[job_index], user, build_pages()) job_application.apply() InputOutput.output('\n\n' + Color.UNDERLINE + Color.BOLD + 'Job Application complete! Thank you for using ' 'our service.' + Color.END + Color.END) Timer.sleep(5) else: raise Exception('Invalid job chosen') except Exception as e: InputOutput.output(e) InputOutput.output('No Jobs found') finally: driver.close()
def get(github_user): try: with Timer('Initialize response dictionary'): resp = {} with Timer('Basic information'): basic_information = github_api.get_basic_user_information( github_user) if not basic_information: return response.make(error=True, message=MESSAGE_USER_NOT_FOUND) resp['username'] = github_user resp['photo'] = response.get('avatar_url', basic_information) resp['public_repos'] = response.get('public_repos', basic_information) resp['public_gists'] = response.get('public_gists', basic_information) resp['followers'] = response.get('followers', basic_information) resp['following'] = response.get('following', basic_information) with Timer('Repositories'): repos_list = github_api.get_repos_from_user(github_user) if not repos_list: return response.make(error=True, message=MESSAGE_USER_NOT_FOUND) resp['repo_amount'] = len(repos_list) resp['repo_fork_amount'] = sum( [response.get('fork', d, default=False) for d in repos_list]) resp['repo_total_size'] = sum( [response.get('size', d, default=0) for d in repos_list]) / 1000 resp['repo_total_stars'] = sum([ response.get('stargazers_count', d, default=0) for d in repos_list ]) resp['repo_total_forks'] = sum([ response.get('forks_count', d, default=0) for d in repos_list ]) resp['repo_total_open_issues'] = sum([ response.get('open_issues', d, default=0) for d in repos_list ]) resp['repo_avg_size'] = formatter.to_float( resp['repo_total_size'] / resp['repo_amount']) resp['repo_avg_stars'] = formatter.to_float( resp['repo_total_stars'] / resp['repo_amount']) resp['repo_avg_forks'] = formatter.to_float( resp['repo_total_forks'] / resp['repo_amount']) resp['repo_avg_open_issues'] = formatter.to_float( resp['repo_total_open_issues'] / resp['repo_amount']) with Timer('Languages & topics - threads'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(github_user, response.get('name', r)) for r in repos_list if response.get('name', r)] language_response_list = list( pool.imap(github_api.get_languages, thread_args)) topic_response_list = list( pool.imap(github_api.get_topics, thread_args)) with Timer('Languages & topics - amount'): languages_dict = {} for language_response in language_response_list: if language_response: for key, value in language_response.items(): if key not in languages_dict: languages_dict[key] = 0 languages_dict[key] += value topics_dict = {} for topic_response in topic_response_list: if topic_response: for topic in topic_response: if topic not in topics_dict: topics_dict[topic] = 0 topics_dict[topic] += 1 with Timer('Languages - percentage'): resp['languages'] = [] total_languages = sum(languages_dict.values()) sorted_languages = sorted(languages_dict.items(), key=operator.itemgetter(1), reverse=True) for idx in range(0, len(sorted_languages)): if idx < GITHUB_LANGUAGES_MAX: language_name, language_amount = sorted_languages[idx] percentage = formatter.to_float( (language_amount / total_languages) * 100) resp['languages'].append( dict(label=language_name, amount=language_amount, percentage=percentage)) else: language_amount = sum([ v[1] for v in sorted_languages[GITHUB_LANGUAGES_MAX:] ]) percentage = formatter.to_float( (language_amount / total_languages) * 100) resp['languages'].append( dict(label='Others', amount=language_amount, percentage=percentage)) break with Timer('Topics - percentage'): resp['topics'] = [] total_topics = sum(topics_dict.values()) sorted_topics = sorted(topics_dict.items(), key=operator.itemgetter(1), reverse=True) for idx in range(0, len(sorted_topics)): if idx < GITHUB_TOPICS_MAX: topic_name, topic_amount = sorted_topics[idx] percentage = formatter.to_float( (topic_amount / total_topics) * 100) resp['topics'].append( dict(label=topic_name, amount=topic_amount, percentage=percentage)) else: topic_amount = sum( [v[1] for v in sorted_topics[GITHUB_TOPICS_MAX:]]) percentage = formatter.to_float( (topic_amount / total_topics) * 100) resp['topics'].append( dict(label='others', amount=topic_amount, percentage=percentage)) break with Timer('Contributors - thread'): with ThreadPool(CONCURRENT_POOL) as pool: thread_args = [(github_user, response.get('name', r)) for r in repos_list if response.get('name', r)] contributor_response_list = list( pool.imap(github_api.get_contributors, thread_args)) with Timer('Contributors - save'): resp['commits'] = 0 resp['commits_user'] = 0 resp['commits_contributor'] = 0 contributors_dict = {} for contributor_response in contributor_response_list: if contributor_response: for contributor in contributor_response: contributor_name = response.get('login', contributor) if contributor_name: contributions = response.get('contributions', contributor, default=0) resp['commits'] += contributions if contributor_name.lower() == github_user.lower(): resp['commits_user'] += contributions else: if contributor_name not in contributors_dict: contributors_dict[contributor_name] = dict( commits=0, photo=response.get('avatar_url', contributor, default=''), url=response.get('html_url', contributor, default='')) resp['commits_contributor'] += contributions contributors_dict[contributor_name][ 'commits'] += contributions resp['commits_user_percentage'] = formatter.to_float( (resp['commits_user'] / resp['commits']) * 100) resp['commits_contributor_percentage'] = formatter.to_float( (resp['commits_contributor'] / resp['commits']) * 100) resp['contributors'] = [] for contributor_name, contributor_dict in contributors_dict.items( ): contributor_dict['label'] = contributor_name resp['contributors'].append(contributor_dict) resp['contributors'] = sorted(resp['contributors'], key=lambda k: k['commits'], reverse=True) return response.make(error=False, response=resp) except Exception as e: log.error(f'Exception while processing {get.__name__} function: [{e}]') log.exception(e) return response.make(error=True, message=MESSAGE_ERROR)
def auto_factorial_cron_job(): with Timer(auto_factorial_cron_job.__name__): runner.run()