def apply_for_the_job(driver, iter): if (iter >= iter_max): return else: item = 1 elements = functions.find_classes(driver, job_row_class) for element in elements: if (check_if_job(element)): item = item + 1 try: print('Opening new job tab') functions.open_in_new_tab( functions.find_class(element, link_class), driver) functions.timeout(2) print('Going to new window') driver.switch_to_window(driver.window_handles[iter + 1]) functions.timeout(2) print('Getting details of the job') get_details_of_opening(driver) # Apply for the jobs make_application(driver) # Go into iteration apply_for_the_job(driver, iter + 1) print('closing the job') functions.close_browser(driver) #switch back to original window functions.timeout(2) print('going to prev window') driver.switch_to_window(driver.window_handles[iter]) except: item = item - 1 if (item > 10): break return 0
for i in range(0, len(login_details)): if login_details.loc[i, 'website'] == portal: loginid = login_details.loc[i, 'loginid'] password = login_details.loc[i, 'password'] # Open website driver.get(website) # Login into the website print("Logging into the website") driver = functions.login_function(driver, login_placeholder_class, loginid, password_placeholder_class, password) functions.timeout(15) print('Finding elements to connect') elements = functions.find_classes(driver, element_class) iter = 0 for element in elements: try: connect_button = functions.find_class(element, connect_button_class) print(connect_button.text) if (connect_button.text == 'Connect'): connect_button.click() iter = iter + 1 functions.timeout(1.5) if (iter >= 100): break except: continue
argparser.add_argument('--path', type=str, default='../data/person_detection/') argparser.add_argument('--labels', default=np.array(['car_side', 'cellphone', 'person'])) argparser.add_argument('--n_epochs', type=int, default=2) argparser.add_argument('--batch_size', type=int, default=16) argparser.add_argument('--criterion', default=nn.CrossEntropyLoss()) argparser.add_argument('--momentum', type=float, default=0.9) argparser.add_argument('--transform', default=transform) argparser.add_argument('--print_every', type=int, default=2) argparser.add_argument('--learning_rate', type=float, default=0.001) argparser.add_argument('--save_model', type=bool, default=False) argparser.add_argument('--feature_extract', type=bool, default=True) args = argparser.parse_args() print(args) data_set = torchvision.datasets.ImageFolder(root=args.path, transform=args.transform) classes = func.find_classes(args.path) # print(data_set.targets) print(data_set.class_to_idx) train_size = len(data_set) // 2 test_size = len(data_set) - train_size data_train, data_test = random_split(data_set, [train_size, test_size]) train_loader = DataLoader(data_train, batch_size=args.batch_size, shuffle=False, sampler=None, num_workers=0) test_loader = DataLoader(data_test, batch_size=args.batch_size, shuffle=False, sampler=None, num_workers=0) print('Train sample: ', train_size) print('Test sample: ', test_size) model = alexnet(pretrained=True) model = func.set_parameter_requires_grad(model, args.feature_extract) model.classifier[6] = nn.Linear(in_features=4096, out_features=len(args.labels))
def get_details_of_opening(driver): # Declaring variables jd = '' job_title = '' hashtag = '' last_login = '' postedBy = '' postedIn = '' jobCode = '' Location = '' postedOn = '' views = '' applications = '' position = '' recruiterAction = '' jd = functions.find_class(driver, job_description_class).text job_title = functions.find_class(driver, title_class).text hashtag = functions.find_class(driver, hastag_class).text elements = functions.find_classes(driver, job_details_class) for element in elements: try: sections = element.text.split('\n') if (sections[0] == 'Last Login:'******'Posted by'): postedBy = sections[1] position = sections[2] elif (sections[0] == 'Posted in'): postedIn = sections[1] elif (sections[0] == 'Job Code'): jobCode = sections[1] elif (sections[0] == 'Location'): Location = sections[1] elif (sections[0] == 'Posted On'): postedOn = sections[1] elif (sections[0] == 'Views'): views = sections[1] elif (sections[0] == 'Applications'): applications = sections[1] elif (sections[0] == 'Recruiter Actions (what does this mean?)'): recruiterAction = sections[1] else: continue except: continue print(element) outfile = output_file + '.csv' dict_data.append({ 'Job Code': jobCode, 'Title': job_title, 'Hashtag': hashtag, 'Description': jd, 'Last Login': last_login, 'Posted By': postedBy, 'Position': position, 'Posted In': postedIn, 'Location': Location, 'Views': views, 'Applications': applications, 'Recruiter Actions': recruiterAction }) functions.write_to_csv(outfile, dict_data, csv_columns) return 1
def apply_filter(driver): functions.find_classes(driver, workex_filter_class)[1].click() dropdown_menu = functions.find_class(driver, workex_chooser_class) functions.find_x_path(dropdown_menu, "//a[@rel='2']").click() functions.find_class(driver, filter_button_class).click() functions.timeout(2)