def process_display(verbose, type, message): if args.v is False: verbose = 0 if type == 0 and verbose == 0: colors.success(message) elif type == 1 and verbose == 0: colors.error(message) elif type == 2 and verbose == 0: colors.process(message) elif type == 3 and verbose == 0: colors.info(message)
def main(): global template_image_dir_name global target_images_dir_name source_path = None logo.banner() print("\n") try: import argparse import sys except ImportError: print("[-] Error importing argparse or sys module") exit(1) parser = argparse.ArgumentParser(description='A program which given a source image and a set of target images ' 'will match the source image to the target images to find its matches') parser.add_argument('-p', '--path', help=' Path of source image') parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0.0(beta)', help='Prints the version ' 'of Photoroid') parser.add_argument('-t', '--target', help=' Path of target images directory', default=target_images_dir_name_default) parser.add_argument('-o', '--output', help='Path of template images directory', default=template_image_dir_name_default) if len(sys.argv) > 1: args = parser.parse_args() source_path = args.path template_image_dir_name = args.output target_images_dir_name = args.target if source_path is None: source_path = str( input("[ {}!{} ] Enter path of source image: {}".format(colors.white, colors.end, colors.lightgreen))) print("\n") # Some serious end of line, for UI purpose LOL ... # Getting the image to be searched source = cv2.imread(source_path, cv2.IMREAD_COLOR) colors.process("Creating template sections of source image.") start_dir = os.getcwd() # Saving the start directory # Creating template sections of source image. template_images(source) colors.success("12 template sections of source image created.") os.chdir(start_dir) colors.process("Setting 'Core' as current directory.") check_match() print("{}\nThank you for using my tool\n".format(colors.blue))
logo.banner() print("\n") try: import argparse import sys except: print("[-] Error importing module") parser = argparse.ArgumentParser() parser.add_argument('-p','--path',help =' Path of template image') if len(sys.argv)>1: args = parser.parse_args() template_path = args.path else: template_path = str(input("[ {}!{} ] Enter Template path : {}".format(colors.white,colors.end,colors.lightgreen))) print("\n") # Some serious end of line, for UI purpose LOL ... # Getting the image to be serached template = cv2.imread(template_path, 1) colors.process("Creating section of template image.") # Creating secotion of template image. template_images(template) colors.success("12 Section of template image created.") os.chdir(os.path.join("..","")) colors.process("Setting 'Core' as current directory.") check_match() print("{}\nThankyou for using my tool\n".format(colors.blue))
watch_value = int(watch_value) if (string.endswith("/stargazers")): # Finding total stargazers star_value = (a_tag.get_text()).strip() star_value = formated(star_value) colors.success("Total stargazers : " + star_value, verbose) time.sleep(1) star_value = int(star_value) if (string.endswith("/members")): # Finding total members fork_value = (a_tag.get_text()).strip() fork_value = formated(fork_value) colors.success("Total Forks : " + fork_value, verbose) time.sleep(1) fork_value = int(fork_value) break stargazer_link = repository_link + "/stargazers" colors.process("Fetching stargazers list", verbose) while (stargazer_link != None): # Getting list of all the stargazers stargazer_html = requests.get(stargazer_link).text soup2 = BeautifulSoup(stargazer_html, "lxml") a_next = soup2.findAll("a") for a in a_next: if a.get_text() == "Next": stargazer_link = a.get('href') break else: stargazer_link = None follow_names = soup2.findAll("h3", {"class": "follow-list-name"}) for name in follow_names: a_tag = name.findAll("a") data.name_list.append(a_tag[0].get_text()) username = a_tag[0].get("href")
break stargazer_link=repository_link+"/stargazers" stargazer_html=requests.get(stargazer_link).text soup2=BeautifulSoup(stargazer_html,"lxml") follow_names=soup2.findAll("h3",{"class":"follow-list-name"}) for name in follow_names: a_tag=name.findAll("a") data.name_list.append(a_tag[0].get_text()) username=a_tag[0].get("href") data.username_list.append(username[1:]) count=1 pos=0 colors.process("Doxing started ...\n") time.sleep(1) print(colors.red+"--------------------------------------------------------------------------",colors.green,end="\n\n") while(count<=star_value): starer_url="https://github.com/"+data.username_list[pos] user_html=requests.get(starer_url).text soup3=BeautifulSoup(user_html,"lxml") repo_data = requests.get("https://github.com/{}?tab=repositories&type=source".format(data.username_list[pos])).text repo_soup = BeautifulSoup(repo_data,"lxml") a_tags = repo_soup.findAll("a") repositories_list = [] for a_tag in a_tags:
def stardox(repo_link, ver, max_threads): try: print_data = True save_data = False for arg in sys.argv[1:]: if arg == '-s' or arg == '--save': save_data = True print_data = False repository_link = repo_link verbose = ver try: # Getting HTML page of repository html = requests.get(repository_link, timeout=8).text except (requests.exceptions.RequestException, requests.exceptions.HTTPError): colors.error( "Enter the repositories url in given format " "[ https://github.com/username/repository_name ]") sys.exit(1) # Checking if the url given is of a repository or not. result = verify_url(html) if result: colors.success("Got the repository data ", verbose) else: colors.error("Please enter the correct URL ") sys.exit(0) # Parsing the html data using BeautifulSoup soup1 = BeautifulSoup(html, "lxml") try: import data except ImportError: colors.error('Error importing data module') sys.exit(1) title = getting_header(soup1) # Getting the title of the page data.header = title # Storing title of the page as Project Title colors.success("Repository Title : " + title, verbose) star_value = watch_value = fork_value = 0 # Finding all the 'a' tags in response html data. a_tags = soup1.findAll("a") for a_tag in a_tags: # Finding total stargazers of the repository string = a_tag.get("href") if(string.endswith("/watchers")): # Finding total watchers watch_value = (a_tag.get_text()).strip() colors.success("Total watchers : " + watch_value, verbose) if(string.endswith("/stargazers")): # Finding total stargazers star_value = (a_tag.get_text()).strip() colors.success("Total stargazers : " + star_value, verbose) if(string.endswith("/members")): # Finding total members fork_value = (a_tag.get_text()).strip() colors.success("Total Forks : " + fork_value, verbose) break stargazer_link = repository_link + "/stargazers" colors.process("Fetching stargazers list", verbose) # Getting list of all the stargazers while (stargazer_link is not None): stargazer_html = requests.get(stargazer_link).text soup2 = BeautifulSoup(stargazer_html, "lxml") a_next = soup2.findAll("a") for a in a_next: if a.get_text() == "Next": stargazer_link = a.get('href') break else: stargazer_link = None follow_names = soup2.findAll("h3", {"class": "follow-list-name"}) for name in follow_names: a_tag = name.findAll("a") data.name_list.append(a_tag[0].get_text()) username = a_tag[0].get("href") data.username_list.append(username[1:]) colors.process("Doxing started ...\n", verbose) print(colors.red + "{0}".format("-") * 75, colors.green, end="\n\n") def wrapper_fetch(f, print_data, q1, q2): while True: try: username = q1.get(timeout=3) name = q2.get(timeout=3) except queue.Empty: return f(print_data, username, name) q1.task_done() q2.task_done() q1 = queue.Queue() q2 = queue.Queue() for (username, name) in zip(data.username_list, data.name_list): q1.put_nowait(username) q2.put_nowait(name) for _ in range(max_threads): threading.Thread(target=wrapper_fetch, args=(fetch_details, print_data, q1, q2)).start() q1.join() q2.join() if save_data is True: save() print("\n", colors.green + "{0}".format("-") * 75, colors.green, end="\n\n") except KeyboardInterrupt: print("\n\nYou're Great..!\nThanks for using :)") sys.exit(0)
if(string.endswith("/stargazers")): star_value=(a_tag.get_text()).strip() star_value=formated(star_value) colors.success("Total stargazers : "+star_value) time.sleep(1) star_value=int(star_value) if(string.endswith("/members")): fork_value=(a_tag.get_text()).strip() fork_value=formated(fork_value) colors.success("Total Forks : "+fork_value) time.sleep(1) fork_value=int(fork_value) break stargazer_link=repository_link+"/stargazers" colors.process("Fetching stargazers list") while (stargazer_link!=None): stargazer_html=requests.get(stargazer_link).text soup2=BeautifulSoup(stargazer_html,"lxml") a_next = soup2.findAll("a") for a in a_next: if a.get_text() == "Next": stargazer_link = a.get('href') break else: stargazer_link = None follow_names=soup2.findAll("h3",{"class":"follow-list-name"}) for name in follow_names: a_tag=name.findAll("a") data.name_list.append(a_tag[0].get_text())
def stardox(repo_link, ver): try: print_data = True save_data = False for arg in sys.argv[1:]: if arg == '-s' or arg == '--save': save_data = True print_data = False repository_link = repo_link verbose = ver try: # Getting HTML page of repository html = requests.get(repository_link, timeout=8).text except (requests.exceptions.RequestException, requests.exceptions.HTTPError): colors.error("Enter the repositories url in given format " "[ https://github.com/username/repository_name ]") sys.exit(1) # Checking if the url given is of a repository or not. result = verify_url(html) if result: colors.success("Got the repository data ", verbose) else: colors.error("Please enter the correct URL ") sys.exit(0) # Parsing the html data using BeautifulSoup soup1 = BeautifulSoup(html, "lxml") try: import data except ImportError: colors.error('Error importing data module') sys.exit(1) title = getting_header(soup1) # Getting the title of the page data.header = title # Storing title of the page as Project Title colors.success("Repository Title : " + title, verbose) star_value = watch_value = fork_value = 0 # Finding all the 'a' tags in response html data. a_tags = soup1.findAll("a") for a_tag in a_tags: # Finding total stargazers of the repository string = a_tag.get("href") if (string.endswith("/watchers")): # Finding total watchers watch_value = (a_tag.get_text()).strip() colors.success("Total watchers : " + watch_value, verbose) if (string.endswith("/stargazers")): # Finding total stargazers star_value = (a_tag.get_text()).strip() colors.success("Total stargazers : " + star_value, verbose) if (string.endswith("/members")): # Finding total members fork_value = (a_tag.get_text()).strip() colors.success("Total Forks : " + fork_value, verbose) break stargazer_link = repository_link + "/stargazers" colors.process("Fetching stargazers list", verbose) # Getting list of all the stargazers while (stargazer_link is not None): stargazer_html = requests.get(stargazer_link).text soup2 = BeautifulSoup(stargazer_html, "lxml") a_next = soup2.findAll("a") for a in a_next: if a.get_text() == "Next": stargazer_link = a.get('href') break else: stargazer_link = None follow_names = soup2.findAll("h3", {"class": "follow-list-name"}) for name in follow_names: a_tag = name.findAll("a") data.name_list.append(a_tag[0].get_text()) username = a_tag[0].get("href") data.username_list.append(username[1:]) count = 1 pos = 0 colors.process("Doxing started ...\n", verbose) print(colors.red + "{0}".format("-") * 75, colors.green, end="\n\n") # Fetching details of stargazers one by one. while (count <= len(data.username_list)): starer_url = "https://github.com/" + data.username_list[pos] user_html = requests.get(starer_url).text soup3 = BeautifulSoup(user_html, "lxml") repo_data = requests.get( "https://github.com/{}?tab=repositories&type=source".format( data.username_list[pos])).text repo_soup = BeautifulSoup(repo_data, "lxml") a_tags = repo_soup.findAll("a") repositories_list = [] for a_tag in a_tags: if a_tag.get("itemprop") == "name codeRepository": repositories_list.append(a_tag.get_text().strip()) if len(repositories_list) > 0: email = get_latest_commit( repositories_list[0], data.username_list[pos]) # Getting stargazer's email data.email_list.append(str(email)) else: data.email_list.append("Not enough information.") if (user_html is not None): items = soup3.findAll("a", {"class": "UnderlineNav-item"}) for item in items[1:]: # Getting total repositories of the stargazer if item.get("href").endswith("repositories") is True: a_tag = item.findAll("span") repo_count = a_tag[0].get_text() data.repo_list.append(repo_count) # Getting total stars by the stargazer elif item.get("href").endswith("stars") is True: a_tag = item.findAll("span") star_count = a_tag[0].get_text() data.star_list.append(star_count) # Getting total followers of the stargazers elif item.get("href").endswith("followers") is True: a_tag = item.findAll("span") followers_count = a_tag[0].get_text() data.followers_list.append(followers_count) # Getting following list of the stargazers elif item.get("href").endswith("following") is True: a_tag = item.findAll("span") following_count = a_tag[0].get_text() data.following_list.append(following_count) if print_data is True: try: import structer # Plotting the tree structer of the fetched details structer.plotdata(len(data.username_list), pos, count) except ImportError: colors.error("Error importing structer module.") sys.exit(1) count += 1 pos += 1 if save_data is True: save() print("\n", colors.green + "{0}".format("-") * 75, colors.green, end="\n\n") except KeyboardInterrupt: print("\n\nYou're Great..!\nThanks for using :)") sys.exit(0)