def __init__(self, shell): self.options = Options() self.shell = shell self.job = self.job() self.load()
def __init__(self, shell): self.options = Options() self.shell = shell self.loader = core.loader self.load()
def test_as_json(self): expected_args = {"arg1": "val1", "arg2": "val2", "arg3": "val3"} scheduler = { 'type': 'test_type', 'autorun': 'true', 'args': expected_args } options = Options(enabled=True, scheduler=scheduler) expected_json = { 'enabled': 'True', 'children': {}, 'scheduler': scheduler } self.assertDictEqual(options.as_json(), expected_json)
def test_to_xml(self): expected_args = {"arg1": "val1", "arg2": "val2", "arg3": "val3"} scheduler = { 'type': 'test_type', 'autorun': 'true', 'args': expected_args } options = Options(enabled=True, scheduler=scheduler) options_xml = options.to_xml() options_derived = Options(xml=options_xml) self.assertEqual(options.enabled, options_derived.enabled) self.assertEqual(options.scheduler['type'], options_derived.scheduler['type']) self.assertEqual(options.scheduler['autorun'], options_derived.scheduler['autorun']) self.assertDictEqual(options.scheduler['args'], options_derived.scheduler['args'])
def update_workflow(playbook_name, workflow_name): if running_context.controller.is_workflow_registered( playbook_name, workflow_name): if request.get_json(): data = request.get_json() if 'scheduler' in data: enabled = data['scheduler']['enabled'] if 'enabled' in data[ 'scheduler'] else False scheduler = { 'type': data['scheduler']['type'] if 'type' in data['scheduler'] else 'cron', 'autorun': (str(data['scheduler']['autorun']).lower() if 'autorun' in data['scheduler'] else 'false'), 'args': json.loads(data['scheduler']['args']) if 'args' in data['scheduler'] else {} } running_context.controller.get_workflow(playbook_name, workflow_name).options = \ Options(scheduler=scheduler, enabled=enabled) if 'new_name' in data and data['new_name']: running_context.controller.update_workflow_name( playbook_name, workflow_name, playbook_name, data['new_name']) workflow_name = data['new_name'] workflow = running_context.controller.get_workflow( playbook_name, workflow_name) if workflow: return json.dumps({ 'workflow': { 'name': workflow_name, 'options': workflow.options.as_json() }, 'status': 'success' }) else: json.dumps({ 'status': 'error: altered workflow can no longer be located' }) else: return json.dumps({'status': 'error: invalid json'}) else: return json.dumps({'status': 'error: workflow name is not valid'})
class Plugin(object): NAME = "" DESCRIPTION = "" AUTHORS = [] def __init__(self, shell): self.options = Options() self.shell = shell self.loader = core.loader self.job = self.job() self.load() ''' called when the framework starts ''' def load(self): pass ''' called when the plugin is invoked ''' def run(self): pass ''' job type of the associated plugin ''' def job(self): pass def dispatch(self, workloads, job, checkrepeat=True, repeatzombie=''): if not repeatzombie: target = self.options.get("ZOMBIE") else: target = repeatzombie commas = [x.strip() for x in target.split(",")] splitted = [] for x in commas: s = x.split("-") if len(s) == 1: splitted.append(str(x)) else: for i in range(int(s[0]), int(s[1]) + 1): splitted.append(str(i)) self.ret_jobs = [] for server in self.shell.stagers: for session in server.sessions: if (target.lower().strip() == "all" or str(session.id) in splitted) and not session.killed: if server.stager.WORKLOAD in workloads: self.shell.print_verbose("Server: %s Sesson %s" % (server, session)) workload = workloads[server.stager.WORKLOAD] options = copy.deepcopy(self.options) j = job(self.shell, session.id, self.STATE, workload, options) self.shell.jobs.append(j) self.ret_jobs.append(j.id) if checkrepeat: if options.get("REPEAT") == "true": self.repeat(self.shell, workloads, options) def load_payload(self, id): try: for server in self.shell.stagers: if int(server.payload_id) == int(id): return server.get_payload().decode() except: pass return None def parse_ips(self, ips): import core.cidr return core.cidr.get_ips(ips) def parse_ports(self, ports): import core.cidr return core.cidr.get_ports(ports) def make_vb_array(self, name, array): ret = "dim %s(%d)\n" % (name, len(array) - 1) count = 0 for el in array: x = '%s(%d) = "%s"\n' % (name, count, str(el)) ret += x count += 1 return ret def make_js_array(self, name, array): array = ['"%s"' % item for item in array] ret = "var %s = [%s];" % (name, ", ".join(array)) return ret def random_string(self, length): return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for n in range(length)) def validate_shellcode(self, shellcode): if len(shellcode) % 2 != 0: return False return all(c in string.hexdigits for c in shellcode) def convert_shellcode(self, shellcode): decis = [] count = 0 for i in range(0, len(shellcode), 2): count += 1 hexa = shellcode[i:i + 2] deci = int(hexa, 16) if count % 25 == 0: decis.append(" _\\n" + str(deci)) else: decis.append(str(deci)) return ",".join(decis)
Options.PrepareFeatures = False Options.PlottingEnabled = True Options.TrainDataSize = 1 Options.TestDataSize = 0 Options.InputFeaturesPath = '' Options.OutputFeaturesPath = '' Options.PredictionSaveDirectory = '' Options.RegressionAssetName = 'NZDUSD_H4' Options.RegressionMethodName = 'HARM_TRIANGULAR' if (len(sys.argv) > 1): Options.InputFeaturesPath = sys.argv[1] if (len(sys.argv) > 2): Options.OutputFeaturesPath = sys.argv[2] print(sys.argv[1], sys.argv[2]) if (len(sys.argv) > 3): Options.RegressionAssetName = sys.argv[3] if (len(sys.argv) > 4): Options.RegressionMethodName = sys.argv[4] Options.OutputValuesKey = Options.RegressionAssetName + '_RPO' Options.OutputDateValuesKey = Options.RegressionAssetName + '_RealClose_date' Options.RealClosePricesKey = Options.RegressionAssetName + '_RealClose' Options.Init() core.run.main()
def __func(wf_name): if running_context.controller.is_workflow_registered( playbook_name, wf_name): data = request.get_json() if 'scheduler' in data: enabled = data['scheduler']['enabled'] if 'enabled' in data[ 'scheduler'] else False scheduler = { 'type': data['scheduler']['type'] if 'type' in data['scheduler'] else 'cron', 'autorun': (str(data['scheduler']['autorun']).lower() if 'autorun' in data['scheduler'] else 'false'), 'args': json.loads(data['scheduler']['args']) if 'args' in data['scheduler'] else {} } running_context.controller.get_workflow(playbook_name, wf_name).options = \ Options(scheduler=scheduler, enabled=enabled) if 'new_name' in data and data['new_name']: if running_context.controller.is_workflow_registered( playbook_name, data['new_name']): current_app.logger.warning( 'Could not update workflow {0}. Workflow already exists.' .format(workflow_name)) return { "error": "Workflow already exists." }, OBJECT_EXISTS_ERROR else: running_context.controller.update_workflow_name( playbook_name, wf_name, playbook_name, data['new_name']) running_context.Triggers.update_workflow( old_workflow=wf_name, new_workflow=data['new_name']) wf_name = data['new_name'] workflow = running_context.controller.get_workflow( playbook_name, wf_name) if workflow: returned_json = { 'workflow': { 'name': wf_name, 'options': workflow.options.as_json(), 'start': workflow.start_step } } current_app.logger.info( 'Updated workflow {0}-{1} to {2}'.format( playbook_name, wf_name, returned_json)) return returned_json, SUCCESS else: current_app.logger.error( 'Altered workflow {0}-{1} no longer in controller'.format( playbook_name, wf_name)) return { 'error': 'Altered workflow can no longer be located.' }, INVALID_INPUT_ERROR else: current_app.logger.error( 'Workflow {0}-{1} not found in controller. Cannot be updated.'. format(playbook_name, wf_name)) return { 'error': 'Playbook or workflow does not exist.' }, OBJECT_DNE_ERROR
def main(): # #FixInterruptHandling().fix() # if (Options.PrepareFeatures): dataPrepared = DataPrepare.run(Options.OutputFeaturesPath, Options.InputFeaturesPath, Options.TrainDataStartDate, Options.FuturePredictionStartDate, Options.FuturePredictionEndDate()) # output_data_dict_train, output_data_dict_future = DataReader.get_output_features( ) # input_data_dict_train, input_data_dict_future = DataReader.get_input_features( ) # Options.InputFeatureSize = len(input_data_dict_train.values()) # plotter = Plotter() #plotter.plot_date_price(date_values, price_values, Options.RegressionAssetName) wave_no = 0 input1 = list(input_data_dict_train.values())[wave_no] #plotter.plot_xy(date_values, input1, list(input_data_dict.keys())[wave_no], 'Dates', 'Power') # Construct PDS input_pds = pd.DataFrame.from_dict(input_data_dict_train) real_close_pds = pd.DataFrame( data=output_data_dict_train[Options.RealClosePricesKey]) output_pds = pd.DataFrame( data=output_data_dict_train[Options.OutputValuesKey]) date_values = output_data_dict_train[Options.OutputDateValuesKey] future_input_pds = pd.DataFrame.from_dict(input_data_dict_future) future_output_pds, future_real_close_pds, future_date_values = DataReader.get_future_output_features( output_data_dict_future) # End Construct PDS # Data Pre Processor pre_processor = DataPreProcessor() #output_pds = pre_processor.to_wavelet(output_pds) #output_pds = pre_processor.filter_by_savgol(output_pds, 51, 3) processor = DataProcessor(input_pds, output_pds, date_values, Options.TrainDataSize, Options.TestDataSize) # LAZY LOAD Neural Network lib ? from core.nnmodel import NNModel ### ONE BY ONE START if (Options.DataWindow == Options.DataWindowType.OneByOne): x_train_o, y_train_o, date_train = processor.get_one_by_one_data( 'train', seq_len=1, multiply_y_vector=Options.MultiplyDataByCustomFactor, normalise=Options.NormaliseData) x_val_o, y_val_o, date_val = processor.get_one_by_one_data( 'val', seq_len=1, multiply_y_vector=Options.MultiplyDataByCustomFactor, normalise=Options.NormaliseData) model = NNModel() model.build_one_by_one_model() callbacks = model.get_callbacks(Options.KerasOneByOneEpochs) model.fit_one_by_one(x_train_o, y_train_o, x_val_o, y_val_o, Options.KerasOneByOneBatchSize, Options.KerasOneByOneEpochs, callbacks) y_train_o_pred = model.predict(x_train_o, 'x_train_o') model.save_pred_to_csv(date_train, y_train_o_pred, Options.PredictionSaveDirs(), Options.GetPredictionSaveFileName('train')) plotter.plot_different_scale(y_train_o_pred, y_train_o, date_train, y_label1="Train Prediction", y_label2="Train") y_val_pred = model.predict(x_val_o, 'x_val_o') model.save_pred_to_csv(date_val, y_val_pred, Options.PredictionSaveDirs(), Options.GetPredictionSaveFileName('val')) plotter.plot_different_scale(y_val_pred, y_val_o, date_val, y_label1="Validation Prediction", y_label2="Validation") processor2 = DataProcessor(input_pds, real_close_pds, date_values, Options.TrainDataSize, Options.TestDataSize) #DELETE THIS temp, y_test_real, date_real = processor2.get_one_by_one_data( 'test', seq_len=1, multiply_y_vector=Options.MultiplyDataByCustomFactor, normalise=Options.NormaliseData) #DELETE THIS x_test_o, y_test_o, date_test = processor.get_one_by_one_data( 'test', seq_len=1, multiply_y_vector=Options.MultiplyDataByCustomFactor, normalise=Options.NormaliseData) y_test_o_pred = model.predict(x_test_o, 'x_test_o') model.save_pred_to_csv(date_test, y_test_o_pred, Options.PredictionSaveDirs(), Options.GetPredictionSaveFileName('test')) plotter.plot_different_scale(y_test_o_pred, y_test_real, y_label1="Test Prediction", y_label2="Real") plotter.plot_different_scale(y_test_o_pred, y_test_o, date_real, y_label1="Test Prediction", y_label2="Real RPO") future_pred = model.predict(future_input_pds, 'future_input_pds') plotter.plot_different_scale( future_pred, future_real_close_pds, np.array([]), y_label1="Future Prediction", y_label2="Future Real") #np.array(future_date_values) throws ex model.save_pred_to_csv(np.array(future_date_values), future_pred, Options.PredictionSaveDirs(), Options.GetPredictionSaveFileName('future')) ### ONE BY ONE END if (Options.DataWindow == Options.DataWindowType.OneByOneTelosSearch): from core.nntelossearch import NNTelosSearch telosSearch = NNTelosSearch() x_train_o, y_train_o = processor.get_train_data_OBO( seq_len=1, normalise=Options.NormaliseData) telosSearch.minimize(x_train_o, y_train_o, [], []) #TELOS END if (Options.DataWindow == Options.DataWindowType.WindowBatch): #x_train, y_train = processor.get_train_data(seq_len=Options.WindowSequenceLength, normalise = Options.NormaliseData) #y_train = y_train * Options.MultiplyDataByCustomFactor #x_test, y_test = processor.get_test_data(seq_len = Options.WindowSequenceLength, normalise = Options.NormaliseData) #y_test = y_test * Options.MultiplyDataByCustomFactor model = NNModel() model.build_windowed_batch_model() callbacks = model.get_callbacks(Options.KerasWindowedEpochs) x_train, y_train, date_train = processor.get_window_train_data( arr_type='train', seq_len=Options.WindowSequenceLength, step=Options.WindowShiftStep, feature_len=Options.InputFeatureSize, multiply_y_vector=1) x_val, y_val, date_val = processor.get_window_train_data( arr_type='val', seq_len=Options.WindowSequenceLength, step=Options.WindowShiftStep, feature_len=Options.InputFeatureSize, multiply_y_vector=1) x_test, y_test, date_test = processor.get_window_train_data( arr_type='test', seq_len=Options.WindowSequenceLength, step=Options.WindowShiftStep, feature_len=Options.InputFeatureSize, multiply_y_vector=1) if (Options.FlattenWindowVector): x_train = np.asarray(x_train, dtype=np.float32).reshape( -1, Options.WindowSequenceLength * Options.InputFeatureSize) x_val = np.asarray(x_val, dtype=np.float32).reshape( -1, Options.WindowSequenceLength * Options.InputFeatureSize) x_test = np.asarray(x_test, dtype=np.float32).reshape( -1, Options.WindowSequenceLength * Options.InputFeatureSize) model.fit_one_by_one(x_train, y_train, x_val, y_val, Options.KerasWindowedBatchSize, Options.KerasWindowedEpochs, callbacks) if (Options.FlattenWindowVector): plotter.plot_different_scale(model.predict(x_train, 'x_train'), y_train, date_train, y_label1="Train Prediction", y_label2="Train") plotter.plot_different_scale(model.predict(x_val, 'x_val'), y_val, date_val, y_label1="Validation Prediction", y_label2="Validation") plotter.plot_different_scale(model.predict(x_test, 'x_test'), y_test, date_test, y_label1="Test Prediction", y_label2="Test") else: train_pred_point_by_point = model.predict_point_by_point(x_train) plotter.plot_different_scale(train_pred_point_by_point, y_train, date_train, y_label1="Train Prediction", y_label2="Train") test_pred_point_by_point = model.predict_point_by_point(x_test) plotter.plot_different_scale(test_pred_point_by_point, y_test, date_test, y_label1="Test Prediction", y_label2="Real") #WINDOW_END print("THE_END")
class Plugin(object): NAME = "" DESCRIPTION = "" AUTHORS = [] def __init__(self, shell): self.options = Options() self.shell = shell self.loader = core.loader self.load() ''' called when the framework starts ''' def load(self): pass ''' called when the plugin is invoked ''' def run(self): pass def dispatch(self, workloads, job): target = self.options.get("ZOMBIE") commas = [x.strip() for x in target.split(",")] splitted = [] for x in commas: s = x.split("-") if len(s) == 1: splitted.append(int(x)) else: splitted.extend(range(int(s[0]), int(s[1]) + 1)) for server in self.shell.stagers: for session in server.sessions: if target == "ALL" or int(session.id) in splitted: if server.stager.WORKLOAD in workloads.keys(): workload = workloads[server.stager.WORKLOAD] options = copy.deepcopy(self.options) j = job(self.shell, session, self.shell.state, workload, options) session.jobs.append(j) def load_payload(self, id): try: for server in self.shell.stagers: if int(server.payload_id) == int(id): return server.get_payload().decode() except: pass return None def parse_ips(self, ips): import core.cidr return core.cidr.get_ips(ips) def parse_ports(self, ports): import core.cidr return core.cidr.get_ports(ports) def make_vb_array(self, name, array): ret = "dim %s(%d)\n" % (name, len(array) - 1) count = 0 for el in array: x = '%s(%d) = "%s"\n' % (name, count, str(el)) ret += x count += 1 return ret def make_js_array(self, name, array): array = ['"%s"' % item for item in array] ret = "var %s = [%s];" % (name, ", ".join(array)) return ret def random_string(self, length): return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for n in range(length)) def validate_shellcode(self, shellcode): if len(shellcode) % 2 != 0: return False return all(c in string.hexdigits for c in shellcode) def convert_shellcode(self, shellcode): decis = [] count = 0 for i in range(0, len(shellcode), 2): count += 1 hexa = shellcode[i:i + 2] deci = int(hexa, 16) if count % 25 == 0: decis.append(" _\\n" + str(deci)) else: decis.append(str(deci)) return ",".join(decis)
class Plugin(object): NAME = "" DESCRIPTION = "" AUTHORS = [] def __init__(self, shell): self.options = Options() self.shell = shell self.loader = core.loader self.load() ''' called when the framework starts ''' def load(self): pass ''' called when the plugin is invoked ''' def run(self): pass def dispatch(self, workloads, job): target = self.options.get("ZOMBIE") splitted = [x.strip() for x in target.split(",")] for server in self.shell.stagers: for session in server.sessions: if target == "ALL" or str(session.id) in splitted: if server.stager.WORKLOAD in workloads.keys(): workload = workloads[server.stager.WORKLOAD] options = copy.deepcopy(self.options) j = job(self.shell, session, self.shell.state, workload, options) session.jobs.append(j) def load_payload(self, id): try: for server in self.shell.stagers: if int(server.payload_id) == int(id): return server.get_payload().decode() except: pass return None def parse_ips(self, ips): import core.cidr return core.cidr.get_ips(ips) def parse_ports(self, ports): import core.cidr return core.cidr.get_ports(ports) def make_vb_array(self, name, array): ret = "dim %s(%d)\n" % (name, len(array) - 1) count = 0 for el in array: x = '%s(%d) = "%s"\n' % (name, count, str(el)) ret += x count += 1 return ret def make_js_array(self, name, array): array = ['"%s"' % item for item in array] ret = "var %s = [%s];" % (name, ", ".join(array)) return ret def random_string(self, length): return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for n in range(length)) def validate_shellcode(self, shellcode): if len(shellcode) % 2 != 0: return False return all(c in string.hexdigits for c in shellcode) def convert_shellcode(self, shellcode): decis = [] count = 0 for i in range(0, len(shellcode), 2): count += 1 hexa = shellcode[i:i+2] deci = int(hexa, 16) if count % 25 == 0: decis.append(" _\\n" + str(deci)) else: decis.append(str(deci)) return ",".join(decis)
def workflow(playbook_name, workflow_name, action): if action == 'add': form = forms.AddWorkflowForm(request.form) if form.validate(): status = 'success' template_playbook = form.playbook.data template = form.template.data if template and template_playbook: if template_playbook in [ os.path.splitext(workflow)[0] for workflow in locate_workflows_in_directory(config.templatesPath) ]: res = running_context.controller.create_workflow_from_template( playbook_name=playbook_name, workflow_name=workflow_name, template_playbook=template_playbook, template_name=template) if not res: add_default_template(playbook_name, workflow_name) status = 'warning: template not found in playbook. Using default template' else: add_default_template(playbook_name, workflow_name) status = 'warning: template playbook not found. Using default template' else: add_default_template(playbook_name, workflow_name) if running_context.controller.is_workflow_registered( playbook_name, workflow_name): workflow = running_context.controller.get_workflow( playbook_name, workflow_name) return json.dumps({ 'workflow': { 'name': workflow_name, 'steps': workflow.get_cytoscape_data(), 'options': workflow.options.as_json() }, 'status': status }) else: return json.dumps({'status': 'error: could not add workflow'}) else: return json.dumps({'status': 'error: invalid form'}) elif action == 'edit': if running_context.controller.is_workflow_registered( playbook_name, workflow_name): form = forms.EditPlayNameForm(request.form) if form.validate(): enabled = form.enabled.data if form.enabled.data else False scheduler = { 'type': form.scheduler_type.data if form.scheduler_type.data else 'chron', 'autorun': str(form.autoRun.data).lower() if form.autoRun.data else 'false', 'args': json.loads(form.scheduler_args.data) if form.scheduler_args.data else {} } running_context.controller.get_workflow(playbook_name, workflow_name).options = \ Options(scheduler=scheduler, enabled=enabled) if form.new_name.data: running_context.controller.update_workflow_name( playbook_name, workflow_name, playbook_name, form.new_name.data) workflow_name = form.new_name.data workflow = running_context.controller.get_workflow( playbook_name, workflow_name) if workflow: return json.dumps({ 'workflow': { 'name': workflow_name, 'options': workflow.options.as_json() }, 'status': 'success' }) else: json.dumps({ 'status': 'error: altered workflow can no longer be located' }) else: return json.dumps({'status': 'error: invalid form'}) else: return json.dumps({'status': 'error: workflow name is not valid'}) elif action == 'save': if running_context.controller.is_workflow_registered( playbook_name, workflow_name): if request.get_json(): if 'cytoscape' in request.get_json(): workflow = running_context.controller.get_workflow( playbook_name, workflow_name) workflow.from_cytoscape_data( json.loads(request.get_json()['cytoscape'])) try: write_format = 'w' if sys.version_info[0] == 2 else 'wb' workflow_filename = os.path.join( config.workflowsPath, '{0}.workflow'.format(playbook_name)) with open(workflow_filename, write_format) as workflow_out: xml = ElementTree.tostring( running_context.controller.playbook_to_xml( playbook_name)) workflow_out.write(xml) return json.dumps({ "status": "success", "steps": workflow.get_cytoscape_data() }) except (OSError, IOError) as e: return json.dumps({ "status": "Error saving: {0}".format(e.message), "steps": workflow.get_cytoscape_data() }) else: return json.dumps({"status": "error: malformed json"}) else: return json.dumps({"status": "error: no information received"}) else: return json.dumps({'status': 'error: workflow name is not valid'}) elif action == 'delete': if running_context.controller.is_workflow_registered( playbook_name, workflow_name): running_context.controller.removeWorkflow(playbook_name, workflow_name) status = 'success' else: status = 'error: invalid workflow name' return json.dumps({ "status": status, "playbooks": running_context.controller.get_all_workflows() }) elif action == 'execute': if running_context.controller.is_workflow_registered( playbook_name, workflow_name): running_context.controller.executeWorkflow(playbook_name, workflow_name) status = 'success' else: status = 'error: invalid workflow name' return json.dumps({"status": status}) else: return json.dumps({"status": 'error: invalid operation'})