def main(): logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%H:%M:S') logging.info('begin validate arguments') nLen = len(sys.argv) if nLen < 4: usage() else: copyFile = sys.argv[1] if not os.path.exists(copyFile): logging.error('CopyRight file ' + copyFile + ' does not exists') sys.exit(-1) srcDir = sys.argv[2] srcDir.replace('/', os.sep) srcDir.replace('\\', os.sep) if not srcDir.endswith(os.sep): srcDir = srcDir + os.sep if not os.path.exists(srcDir): logging.error('SRC_DIR ' + srcDir + ' does not exists') sys.exit(-2) filePattern = sys.argv[3:] if len(filePattern) == 0: logging.error('File_Pattern not specifify') sys.exits(-3) logging.info('read CopyRight file ' + copyFile + ' into memory') exactCopy(copyFile) process(srcDir, filePattern)
def automate(domain): ''' Automate recon process ''' workplace = WorkPlace(domain) workplace.my_work() doing("Whois query") whois_result = whois(domain) if whois_result != 0: success(whois_result) workplace.reporter('whois', whois_result) else: error("Something went wrong") doing("GEOIP") geoip_result = geoip(domain) if geoip_result != 0: workplace.reporter('GeoIP', geoip_result) success(geoip_result) else: error("Somthing went wrong") sleep(1) doing("Subdomain enumeration") hackertarget_result = hackertarget(domain) if hackertarget_result != 0: workplace.reporter('subdomains', hackertarget_result) success(hackertarget_result) if hackertarget_result == 0: error("Something went worng") sleep(1) doing("Getting targets from Shodan") shodan_result = shodan_parser(Shodan(domain)) if shodan_result != 0: workplace.reporter('shodan', ' '.join(shodan_result)) success("Result") for item in shodan_result: print(item) else: if key() == 0: error("Please add your api key") else: error("Something went wrong") exits() sleep(1) doing("Checking if the target is pwned") pwned_result = pwned_parser(pwned(domain)) if isinstance(pwned_result, str): workplace.reporter('pwned', pwned_result) success(pwned_result) if pwned_result != 9: error("No breach") else: error("Something went wrong") exits() success(f"Workplace saved in : {workplace.name}")
def domain(module): ''' bla bla bla read the code and uou will understand ''' domain = str(input(GDOMAIN)) data = load(module, domain) if data != 0: return data else: error("Something went worng") exits()
def scrape_table_row(div_one_index): file_name_path = "div[@class='ef-name-col']" file_link_path = "/a[@class='ef-name-col__link']" log.info(" Find file name") file_name = find_table_text_by_rel(file_name_path, div_one_index) log.info(" Find file link") file_link = find_table_text_by_rel(file_name_path + file_link_path, div_one_index) log.info(" Find file creation date") file_date_created = find_table_text_by_rel( "div[@class='ef-date-created-col']", div_one_index) log.info(" Find file modification date") file_date_modified = find_table_text_by_rel( "div[@class='ef-date-modified-col']", div_one_index) log.info(" Find file size") file_size_str = find_table_text_by_rel("div[@class='ef-size-col']", div_one_index) file_date = raw_string_to_date(file_date_created) try: file_date_modified = raw_string_to_date(file_date_modified) if file_date_modified > file_date: file_date = file_date_modified except ValueError as e: log.info("No file modification date") if '/files/folder/' in file_link: log.info("Its a dir, entering: {}".format(file_name)) url = driver.current_url driver.goto(file_link) children = index_right_pane_file_tree(driver, settings) log.info("Returning from dir") driver.goto(url) folder_size = 0 for child in children: folder_size += child.size_bytes return CourseDir(name_in_fs=file_name, size_bytes=folder_size, iso_date_class=file_date, download_url=file_link, children=children) else: log.info("Its a file: {}".format(file_name)) file_size = raw_file_size_to_bytes(file_size_str) if file_size == -1: log.error("file without size?") exits(1) return CourseFile(name_in_fs=file_name, size_bytes=file_size, iso_date_class=file_date, download_url=file_link)
def read_whitelist(protocol): """ Read Whitelist, get whitelist from config file: Drops/whitelistDir, Return: A set of whitelist barcodes """ whilelistdir = get_config("Drops", "whitelistDir") if protocol == "10X": bc_white = [set()] white_path = os.path.join(whilelistdir, "whitelist.10X.txt") print("[info] The white list is {}".format(white_path)) with open(white_path, 'r') as infile: for line in infile: bc_white[0].add(line.strip()) elif protocol in ["10X_14_10", "10X_14_5"]: bc_white = [set()] white_path = os.path.join(whilelistdir, "whitelist.10X_14.txt") print("[info] The white list is {}".format(white_path)) with open(white_path, 'r') as infile: for line in infile: bc_white[0].add(line.strip()) elif protocol == "indrop": bc_white = [set(), set()] white_path = os.path.join(whilelistdir, "whitelist.indrop_1.txt") if not os.path.exists(white_path): sys.exit("WhiteList path does noe exits: {}".format(white_path)) with open(white_path, 'r') as infile: for line in infile: bc_rev = rev_comp(line.strip()) bc_white[0].add(bc_rev) white_path = os.path.join(whilelistdir, "whitelist.indrop_2.txt") with open(white_path, 'r') as infile: for line in infile: bc_rev = rev_comp(line.strip()) bc_white[1].add(bc_rev) elif protocol == "dropseq": bc_white = [] else: sys.exits("Not valid Protocol Type entered...") raise Exception("Not valid Protocol Type used.") return bc_white
def yes_no(root: Tk, text: str, title: str =argv[0]) -> str: """Display box asking yes or no. Arguments: root -- Tk root window to bind to text -- text displayed in box title -- text displayed in titlebar Result: Return True or False Terminates program if user does not select 'yes' or 'no' """ yesno = messagebox.askyesno(title, text) if yesno not in [True, False]: log.critical('%s: answer: %s\n', 'yes_no()', yesno) root.destroy() exits(0) return yesno
def yes_no(root: Tk, text: str, title: str = argv[0]) -> str: """Display box asking yes or no. Arguments: root -- Tk root window to bind to text -- text displayed in box title -- text displayed in titlebar Result: Return True or False Terminates program if user does not select 'yes' or 'no' """ yesno = messagebox.askyesno(title, text) if yesno not in [True, False]: log.critical('%s: answer: %s\n', 'yes_no()', yesno) root.destroy() exits(0) return yesno
def folder_select(root: Tk, name: str) -> str: """Prompt to select folder. Arguments: root -- Tk root window to bind to name -- name of folder, to be used in titlebar of window Result: Return folder user selects. Exit if user presses 'Cancel' or closes window. """ folder = filedialog.askdirectory(parent=root, initialdir='C:\\', mustexist=True, title='Select {} directory.'.format(name)) if not folder: root.destroy() exits(0) log.debug('%s: folder: %s\n', 'folder_select()', folder) return folder
def folder_select(root: Tk, name: str) -> str: """Prompt to select folder. Arguments: root -- Tk root window to bind to name -- name of folder, to be used in titlebar of window Result: Return folder user selects. Exit if user presses 'Cancel' or closes window. """ folder = filedialog.askdirectory(parent=root, initialdir='C:\\', mustexist=True, title='Select {} directory.'.format(name) ) if not folder: root.destroy() exits(0) log.debug('%s: folder: %s\n', 'folder_select()', folder) return folder
def scan(): ''' scanner interface ''' menu(["Clickjacking"]) try: choose = int(input(CHOOSE)) if choose == 1: domain = str(input(GDOMAIN)) if click_jacking(domain): success(f"{domain} : could be \"Vulnerable\"") else: error("Not vulnerable") exits() else: error("Wrong number") exits() except KeyboardInterrupt: error("Bey bey") exits()
def main(argv): if len(argv) != 2 and len(argv) != 4 and argv[2] != '-h': print( "Usage: python3 perceplearn.py TRAININGFILE MODELFILE -h DEVFILE") sys.exit() if not os.path.exists(argv[0]): print('File does not exist') sys.exit() if not os.path.isfile(argv[0]): print('Not a file') sys.exits() if len(argv) == 4: if not os.path.exists(argv[3]): print(argv[3] + ' does not exist') sys.exit() if not os.path.isfile(argv[3]): print(argv[3] + ' not a file') sys.exit() input = open(argv[0], 'r', errors='ignore') output = open(argv[1], 'wb') class_list = {} feature_list = {} model = {} class_n = 0 feature_n = 0 for line in input: list = line.split() if list[0] not in class_list: class_list[list[0]] = class_n class_n = class_n + 1 for feature in list: if feature not in feature_list: feature_list[feature] = feature_n feature_n = feature_n + 1 prod = [0] * len(class_list) weight = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] avg_weight = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] update = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] icount = 0 model['features'] = feature_list model['class'] = class_list for iter in range(1, 20): input = open(argv[0], 'r', errors='ignore') line_count = 0 incorrect = 0 data = [(random.random(), line) for line in input] data.sort() for _, line in data: line_count += 1 icount = icount + 1 list = line.split() true_class = class_list[list[0]] count = Counter(list[1:]) for key, value in count.items(): for cls in range(0, len(weight)): prod[cls] = prod[cls] + value * weight[cls][ feature_list[key]] z = prod.index(max(prod)) prod = [0] * len(class_list) if z != class_list[list[0]]: incorrect = incorrect + 1 for key, value in count.items(): weight[true_class][feature_list[key]] = weight[true_class][ feature_list[key]] + value update[true_class][feature_list[key]] = update[true_class][ feature_list[key]] + icount * value for key, value in count.items(): weight[z][feature_list[key]] = weight[z][ feature_list[key]] - value update[z][feature_list[key]] = update[z][ feature_list[key]] - icount * value accuracy = (line_count - incorrect) / line_count if len(argv) == 4: temp = [[update[i][j] / icount for j in range(len(update[i]))] for i in range(len(update))] for i in range(len(weight)): avg_weight[i][:] = map(operator.sub, weight[i], temp[i]) accuracy = getDevAccuracy(avg_weight, feature_list, class_list, argv[3]) print('Iteration: ' + str(iter) + ' Accuracy on development set: ' + str(accuracy)) if len(argv) == 2: print('Iteration: ' + str(iter) + ' Accuracy on training set: ' + str(accuracy)) if len(argv) == 2: temp = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] temp = [[update[i][j] / icount for j in range(len(update[i]))] for i in range(len(update))] for i in range(len(weight)): weight[i][:] = map(operator.sub, weight[i], temp[i]) model['weight'] = weight pickle.dump(model, output) output.close()
self.init_analysis() return self.par ################################################################################# #main program if __name__ == "__main__": start_time = time.time() #check python version if int(sys.version[0]) <= 2: print(sys.version) print( '\nError: The version of python required for the pipeline running is at least v3.4.~\n' ) sys.exits(2) ######################################## #read variables.txt #var_file = '/home/yuan/rawdata_phip/phipseq17_virus_variables.txt' var_file = os.path.abspath(sys.argv[1]) #print(var_file) par = myIO.file_os(var_file, '=').to_dict() par['file_var'] = var_file #initiate parameters, directories and files par = launch_phip(par).init_par() ###################################### #main loop bioPHIPfunc.phip(par).main_loop() ######################################
def manual(): ''' manual interface ''' menu(["Whois", "GEO to IP", "Shodan", "Subdoamins", "PWNED ?"]) try: choose = int(input(CHOOSE)) if choose == 1: domain(whois) exits() if choose == 2: domain(geoip) exits() if choose == 3: for item in shodan_parser(domain(Shodan)): print(item) exits() if choose == 4: domain(hackertarget) exits() if choose == 5: pwn = pwned_parser(domain(pwned)) if pwn != 0: success(pwn) if pwn == 9: error("Not Found") else: error("Something went wrong") exits() else: error("Wrong number") exits() except ValueError: error("Please only nembers") exits()
def sashimi_plot_without_bams(tsv_file, meta_file, gtf, group_id, out_dir, prefix, shrink, min_coverage, pdf): with open(tsv_file, 'r') as f: lines = f.readlines() coord, strand = None, None for line in lines: if line.startswith('#') or line.startswith('GeneName'): continue # GeneName, GroupID, FeatureElement, FeatureType, FeatureLabel, strand, p-value, q-value, dPSI, # ReadCount1, ReadCount2, PSI items = line.strip().split('\t') _gene_name, _group_id, label, _strand = items[0], items[1], items[4], items[5] if _group_id == group_id: strand = _strand chr, start, end = parse_coordinates(label) if coord: coord[1], coord[2] = min(start, coord[1]), max(end, coord[2]) else: coord = [chr, start, end] if not coord: raise Exception(f"Can't find the coordinate with the provided group ID {group_id}!") sys.exits(-1) strand = strand if strand and strand != '.' else None coord[1], coord[2] = coord[1] - 100, coord[2] + 100 x, y = get_depths_from_gtf(gtf, coord, strand) bam_dict, overlay_dict, color_dict, = {"+": OrderedDict()}, OrderedDict(), OrderedDict() label_dict, id_list = OrderedDict(), [] if strand == '-': bam_dict['-'] = OrderedDict() if not strand: strand = '+' with open(meta_file, 'r') as f: for line in f: id, overlay_level = line.strip().split('\t') id_list.append(id) overlay_dict.setdefault(overlay_level, []).append(id) label_dict[overlay_level] = overlay_level color_dict.setdefault(overlay_level, overlay_level) bam_dict[strand][id] = (x, y, [], [], [], [], []) with open(tsv_file, 'r') as f: lines = f.readlines() if 'rmats' in lines[0]: for line in lines: if line.startswith('#') or line.startswith('GeneName'): continue items = line.strip().split('\t') _gene_name, _group_id, label, _strand = items[0], items[1], items[4], items[5] chr, start, end = coord if _group_id == group_id: coordinates_list = parse_rmats_coordinates(label) for coordinates, k in zip(coordinates_list, [9, 10]): read_counts = [int(v) for v in items[k].split(',')] for chr, _start, _end in coordinates: for i, count in enumerate(read_counts): if count < min_coverage: continue bam_dict[strand][id_list[i]][2].append(_start) bam_dict[strand][id_list[i]][3].append(_end) bam_dict[strand][id_list[i]][4].append( y[_start - start - 1]) bam_dict[strand][id_list[i]][5].append( y[_end - start + 1]) bam_dict[strand][id_list[i]][6].append(count) else: for line in lines: if line.startswith('#') or line.startswith('GeneName'): continue #GeneName GroupID FeatureElement FeatureType FeatureLabel strand p-value q-value dPSI ReadCount1 ReadCount2 PSI items = line.strip().split('\t') _gene_name, _group_id, label, _strand = items[0], items[1], items[4], items[5] chr, _start, _end = parse_coordinates(label) if _group_id == group_id: read_counts = [int(v) for v in items[9].split(',')] # psis = [float(v) for v in items[11].split(',')] chr, start, end = coord for i, count in enumerate(read_counts): # dons, accs, yd, ya, counts = [], [], [], [], [] if count < min_coverage: continue bam_dict[strand][id_list[i]][2].append(_start) bam_dict[strand][id_list[i]][3].append(_end) bam_dict[strand][id_list[i]][4].append( y[_start - start - 1]) bam_dict[strand][id_list[i]][5].append( y[_end - start + 1]) bam_dict[strand][id_list[i]][6].append(count) palette = get_preset_palette() # Find set of junctions to perform shrink intersected_introns = None if shrink: introns = (v for vs in bam_dict[strand].values() for v in zip(vs[2], vs[3])) intersected_introns = list(intersect_introns(introns)) # *** PLOT *** Define plot height height, width, base_size = 3 * len(overlay_dict), 10, 14 # *** PLOT *** Start R script by loading libraries, initializing variables, etc... R_script = setup_R_script(height, width, base_size, label_dict) R_script += colorize(color_dict, palette) R_script += make_R_lists(id_list, bam_dict[strand], overlay_dict, '', intersected_introns) out_format = 'pdf' if pdf else 'png' out_file = out_dir / '_'.join(filter(None, [prefix, f'sashimi.{out_format}'])) resolution = 300 alpha = 0.5 height = 3 R_script += R_script_plot(out_file, out_format, resolution, '', '', height, 3, alpha) if os.getenv('GGSASHIMI_DEBUG') is not None: with open("R_script", 'w') as r: r.write(R_script) else: plot(R_script)
def sashimi_plot_with_bams(bams, coordinate, gtf, out_dir, prefix, shrink, strand="NONE", min_coverage=1, group_id=None, tsv_file=None, pdf=False): if not group_id and not coordinate: raise Exception('Please specify either a coordinate or a group-id!') sys.exits(-1) if group_id: if not tsv_file: raise Exception('Please specify a tsv file (--tsv-file) with the group-id option!') sys.exits(-1) if coordinate: print('Warming: Jutils generates the Sashimi plot based on the provided group-id, the coordinate will be ignored!') with open(tsv_file, 'r') as f: lines = f.readlines() coord, strand = None, None for line in lines: if line.startswith('#') or line.startswith('GeneName'): continue # GeneName, GroupID, FeatureElement, FeatureType, FeatureLabel, strand, p-value, q-value, dPSI, # ReadCount1, ReadCount2, PSI items = line.strip().split('\t') _gene_name, _group_id, label, _strand = items[0], items[1], items[4], items[5] if _group_id == group_id: strand = _strand chr, start, end = parse_coordinates(label) if coord: coord[1], coord[2] = min(start, coord[1]), max(end, coord[2]) else: coord = [chr, start, end] if not coord: raise Exception(f"Can't find the coordinate with the provided group ID {group_id} in {tsv_file}!") sys.exits(-1) strand_codes = {'+': "SENSE", '-': "ANTISENSE"} strand = strand_codes[strand] if strand in '+-' else 'NONE' coordinate = f'{chr}:{coord[1]-100}-{coord[2]+100}' palette = get_preset_palette() bam_dict, overlay_dict, color_dict, = {"+": OrderedDict()}, OrderedDict(), OrderedDict() label_dict, id_list = OrderedDict(), [] if strand != "NONE": bam_dict["-"] = OrderedDict() for id, bam, overlay_level in read_bam_input(bams): if not os.path.isfile(bam): continue a, junctions = read_bam(bam, coordinate, strand) if a.keys() == ["+"] and all(map(lambda x: x == 0, list(a.values()[0]))): print("WARN: Sample {} has no reads in the specified area.".format(id)) continue id_list.append(id) for _strand in a: bam_dict[_strand][id] = prepare_for_R(a[_strand], junctions[_strand], coordinate, min_coverage) overlay_dict.setdefault(overlay_level, []).append(id) label_dict[overlay_level] = overlay_level color_dict.setdefault(overlay_level, overlay_level) # No bam files if not bam_dict["+"]: print("ERROR: No available bam files.") exit(1) if gtf: transcripts, exons = read_gtf(gtf, coordinate) # Iterate for plus and minus strand for strand in bam_dict: # Find set of junctions to perform shrink intersected_introns = None if shrink: introns = (v for vs in bam_dict[strand].values() for v in zip(vs[2], vs[3])) intersected_introns = list(intersect_introns(introns)) # *** PLOT *** Define plot height height, width, ann_height, base_size = 3 * len(overlay_dict), 10, 3, 14 if gtf: height += ann_height # *** PLOT *** Start R script by loading libraries, initializing variables, etc... R_script = setup_R_script(height, width, base_size, label_dict) R_script += colorize(color_dict, palette) # *** PLOT *** Prepare annotation plot only for the first bam file arrow_bins = 50 if gtf: # Make introns from annotation (they are shrunk if required) annotation = make_introns(transcripts, exons, intersected_introns) x = list(bam_dict[strand].values())[0][0] if shrink: x, _ = shrink_density(x, x, intersected_introns) R_script += gtf_for_ggplot(annotation, x[0], x[-1], arrow_bins) R_script += make_R_lists(id_list, bam_dict[strand], overlay_dict, '', intersected_introns) out_format = 'pdf' if pdf else 'png' out_file = out_dir / '_'.join(filter(None, [prefix, f'sashimi.{out_format}'])) resolution = 300 alpha = 0.5 height = 3 R_script += R_script_plot(out_file, out_format, resolution, gtf, '', height, ann_height, alpha) if os.getenv('GGSASHIMI_DEBUG') is not None: with open("R_script", 'w') as r: r.write(R_script) else: plot(R_script)
def main(argv): if len(argv) != 2 and len(argv) != 4 and argv[2] != '-h' : print("Usage: python3 perceplearn.py TRAININGFILE MODELFILE -h DEVFILE") sys.exit() if not os.path.exists(argv[0]) : print('File does not exist') sys.exit() if not os.path.isfile(argv[0]): print('Not a file') sys.exits() if len(argv) == 4: if not os.path.exists(argv[3]) : print(argv[3] + ' does not exist') sys.exit() if not os.path.isfile(argv[3]): print(argv[3] + ' not a file') sys.exit() input = open(argv[0], 'r', errors='ignore') output = open(argv[1], 'wb') class_list = {} feature_list = {} model = {} class_n = 0 feature_n = 0 for line in input: list = line.split() if list[0] not in class_list: class_list[list[0]] = class_n class_n = class_n + 1 for feature in list: if feature not in feature_list: feature_list[feature] = feature_n feature_n = feature_n + 1 prod = [0] *len(class_list) weight = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] avg_weight = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] update = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] icount = 0 model['features'] = feature_list model['class'] = class_list for iter in range(1, 50): input = open(argv[0], 'r', errors='ignore') line_count = 0 incorrect = 0 data = [ (random.random(), line) for line in input ] data.sort() for _, line in data: line_count += 1 icount = icount + 1 list = line.split() true_class = class_list[list[0]] count = Counter(list[1:]) for key, value in count.items(): for cls in range(0, len(weight)): prod[cls] = prod[cls] + value * weight[cls][feature_list[key]] z = prod.index(max(prod)) prod = [0] *len(class_list) if z != class_list[list[0]]: incorrect = incorrect + 1 for key, value in count.items(): weight[true_class][feature_list[key]] = weight[true_class][feature_list[key]] + value update[true_class][feature_list[key]] = update[true_class][feature_list[key]] + icount * value for key, value in count.items(): weight[z][feature_list[key]] = weight[z][feature_list[key]] - value update[z][feature_list[key]] = update[z][feature_list[key]] - icount * value accuracy = (line_count - incorrect)/line_count if len(argv) == 4: temp =[[update[i][j]/icount for j in range(len(update[i]))] for i in range(len(update))] for i in range(len(weight)): avg_weight[i][:] = map(operator.sub, weight[i], temp[i]) accuracy = getDevAccuracy(avg_weight,feature_list,class_list, argv[3]) print('Iteration: ' + str(iter) + ' Accuracy on development set: '+ str(accuracy)) if len(argv) == 2: print('Iteration: ' + str(iter) + ' Accuracy on training set: '+ str(accuracy)) if len(argv) == 2: temp = [[0 for x in range(len(feature_list))] for x in range(len(class_list))] temp =[[update[i][j]/icount for j in range(len(update[i]))] for i in range(len(update))] for i in range(len(weight)): weight[i][:] = map(operator.sub, weight[i], temp[i]) model['weight'] = weight pickle.dump(model, output) output.close()
print("Usage: python multi_clock_v2.py [revolution] [total loops] [\"What task?\"] "); exit(0); reason = str(sys.argv[3]); total = int(sys.argv[2]); revolution = int(sys.argv[1]) ; if(not os.path.exists(store_dir)): os.makedirs(store_dir) record_file_name = str(time.time()); record_file = open(store_dir+record_file_name,"w"); record_file.write(str(datetime.now())); record_file.write("Description:"); record_file.write(sys.argv[3]); record_file.write("Revolotion:"); record_file.write(sys.argv[1]); record_file.write("Total loops:"); record_file.write(sys.argv[2]); thread1=Thread(target = reset_timer, name ="thread-1"); try: Thread.start( thread1 ); except: print("Error: unable to start thread"); sys.exits(0); try: thread1.join(); except: sys.exit(0);
def manual(): ''' manual interface ''' menu([ "Whois", "IP to GeoLocation", "Shodan", "Subdoamins", "PWNED ?", "Google Dork" ]) try: choose = int(input(CHOOSE)) if choose == 1: from modules.recon.passive.whois import whois domain(whois) exits() if choose == 2: from modules.recon.passive.geoip import geoip domain(geoip) exits() if choose == 3: from modules.recon.passive.shodan import shodan for item in shodan_parser(domain(Shodan)): print(item) exits() if choose == 4: from modules.recon.passive.hackertarget import hackertarget domain(hackertarget) exits() if choose == 5: from modules.recon.passive.pwned import pwned pwn = pwned_parser(domain(pwned)) if pwn != 0: success(pwn) if pwn == 9: error("Not Found") else: error("Something went wrong") exits() if choose == 6: from modules.recon.passive.gdork import gdork dork = gdork(domain(gdork)) if dork != 0: success(dork) if dork == 9: error("Not Found") else: error("Something went wrong") exits() else: error("Wrong number") exits() except ValueError: error("Please only nembers") exits()
def create_worker_stack(self, cluster_name, vpc_id, vpc_sg, vpc_subnet_ids, instance_type, node_num): # a stack of worker instances is created using CloudFormation. click.echo("*** Workers stack.") workers_name = cluster_name + "-workers" if self.stack_exists(workers_name): click.echo("Workers stack already exists.") else: click.echo("Creating workers stack...") keypair_name = cluster_name + "-keypair" # Create key pair self._ec2.create_key_pair(KeyName=keypair_name) # Create stack response = self._cf.create_stack( StackName=workers_name, TemplateURL=self.workers_template, Capabilities=["CAPABILITY_IAM"], Parameters=[{ "ParameterKey": "ClusterName", "ParameterValue": cluster_name }, { "ParameterKey": "ClusterControlPlaneSecurityGroup", "ParameterValue": vpc_sg, }, { "ParameterKey": "NodeGroupName", "ParameterValue": cluster_name + "-worker-group", }, { "ParameterKey": "NodeAutoScalingGroupMinSize", "ParameterValue": str(1), }, { "ParameterKey": "NodeAutoScalingGroupMaxSize", "ParameterValue": str(node_num), }, { "ParameterKey": "NodeInstanceType", "ParameterValue": instance_type, }, { "ParameterKey": "KeyName", "ParameterValue": keypair_name }, { "ParameterKey": "VpcId", "ParameterValue": vpc_id }, { "ParameterKey": "Subnets", "ParameterValue": ",".join(vpc_subnet_ids), }], TimeoutInMinutes=15, OnFailure="DELETE", ) if response is None: click.echo( "Response is None, create worker group stack failed.") sys.exit(1) if "StackId" not in response: click.echo( "StackId not in response, create worker group stack failed." ) sys.exit(1) click.echo("Initiated workers (ETA 5-20 mins)...") click.echo("Waiting for workers stack creation to complete...") try: # This is a water which waits for the stack deployment to complete. waiter = self._cf.get_waiter("stack_create_complete") waiter.wait(StackName=workers_name) except Exception as e: click.echo("Error: %s, gave up waiting for stack to create." % str(e)) sys.exit(1) click.echo("Worker stack created.") stack = self._cf.describe_stacks(StackName=workers_name) node_instance_role = None # We need NodeInstanceRole output. for v in stack["Stacks"][0]["Outputs"]: if v["OutputKey"] == "NodeInstanceRole": node_instance_role = v["OutputValue"] click.echo("Node instance role: %s" % node_instance_role) click.echo("*** Update worker auth.") config = "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n" + \ " namespace: kube-system\ndata:\n mapRoles: |\n" + \ " - rolearn: " + node_instance_role + "\n" + \ " username: system:node:{{EC2PrivateDNSName}}\n" + \ " groups:\n - system:bootstrappers\n - system:nodes\n" click.echo("Write config map...") worker_auth_file = os.path.dirname( self._output_path) + "/aws-auth-cm.yaml" os.makedirs(os.path.dirname(worker_auth_file), exist_ok=True) with open(worker_auth_file, "w") as f: f.write(config) # add the worker resource to cluster try: subprocess.run([ "kubectl", "--kubeconfig=%s" % self._output_path, "apply", "-f", worker_auth_file, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as e: click.echo("Error: %s" % e.stderr) sys.exits(1) click.echo("kube config generated. Try:") click.echo(" kubectl --kubeconfig=%s get nodes" % self._output_path)
import json import sys import os if __name__ == '__main__': fname = '/etc/httpd/irods/irods_environment.json' if os.path.exists(fname): # readin JSON file f = open(fname, 'r') cfg = json.load(f) f.close() # modify attributes cfg['irods_host'] = os.environ['IRODS_ICAT_HOST'] cfg['irods_port'] = int(os.environ['IRODS_ZONE_PORT']) cfg['irods_zone_name'] = os.environ['IRODS_ZONE_NAME'] cfg['irods_user_name'] = 'irods' cfg['irods_default_resource'] = '' # write JSON file back f = open(fname, 'w') json.dump(cfg, f, sort_keys=True, indent=4, separators=(',', ': ')) f.close() else: print(sys.stderr, "file not found: %s" % fname) sys.exits(1)