def decode2(data_decode, scaling_factor, list_discrete, records_d, plot=False): args = params.Params('hyperparameters/bank_SAIA.json') Data_train_decomp, Data_train_comp, Data_train_noisy_decomp, mask_train_decomp, Data_test_decomp, mask_test_comp, mask_test_decomp, cat_dims, DIM_FLT, dic_var_type = data_decode print('Decode Training Start') vae = p_vae_active_learning(Data_train_comp, Data_train_noisy_decomp, mask_train_decomp, Data_test_decomp, mask_test_comp, mask_test_decomp, cat_dims, DIM_FLT, dic_var_type, args, list_discrete, records_d) npzfile = np.load(args.output_dir + '/UCI_rmse_curve_SING.npz') IC_SING = npzfile['information_curve'] * scaling_factor[-1] if plot: import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator plt.figure(0) L = IC_SING.shape[1] fig, ax1 = plt.subplots() # These are in unitless percentages of the figure size. (0,0 is bottom left) left, bottom, width, height = [0.45, 0.4, 0.45, 0.45] ax1.plot(np.sqrt((IC_SING[:, :, 0:]**2).mean(axis=1)).mean(axis=0), 'ys', linestyle='-.', label='dataset') ax1.errorbar(np.arange(IC_SING.shape[2]), np.sqrt((IC_SING[:, :, 0:]**2).mean(axis=1)).mean(axis=0), yerr=np.sqrt( (IC_SING[:, :, 0:]**2).mean(axis=1)).std(axis=0) / np.sqrt(IC_SING.shape[0]), ecolor='y', fmt='ys') plt.xlabel('Steps', fontsize=18) plt.ylabel('avg. test. RMSE', fontsize=18) plt.xticks(fontsize=18) plt.yticks(fontsize=18) ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) ax1.legend(bbox_to_anchor=(0.0, 1.02, 1., .102), mode="expand", loc=3, ncol=1, borderaxespad=0., prop={'size': 20}, frameon=False) ax1.ticklabel_format(useOffset=False) plt.show() return vae
def run(self): info = { 'name': 'Php', 'fullname': 'PHP Code Injection', 'author': 'Momo Outaadi (M4ll0k)', 'description': 'Find PHP Code Injection Vulnerability' } self.output.test('Checking php code injection...') payload = "1;phpinfo()" try: for url in self.urls: param = params.Params(url, payload).process() if len(param) > 1: for para in param: resp = self.request.send(url=para, method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search( r'<title>phpinfo[()]</title>|<h1 class="p">PHP Version (.*?)</h1>', resp.content): self.output.plus( 'That site is may be vulnerable to PHP Code Injection at %s' % para) elif len(param) == 1: resp = self.request.send(url=param[0], method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search( r'<title>phpinfo[()]</title>|<h1 class="p">PHP Version (.*?)</h1>', resp.content): self.output.plus( 'That site is may be vulnerable to PHP Code Injection at %s' % param[0]) except Exception, e: pass
def run(self): info = { 'name' : 'Sql', 'fullname' : 'SQL Injection', 'author' : 'Momo Outaadi (M4ll0k)', 'description' : 'Find SQL Injection Vulnerability' } self.output.test('Checking sql injection...') db = open('data/sql.txt','rb') dbfiles = [x.split('\n') for x in db] try: for payload in dbfiles: for url in self.urls: # replace queries with payload param = params.Params(url,payload[0]).process() if len(param) > 1: for para in param: resp = self.request.send( url = para, method = "GET", payload = None, headers = None, cookies = self.cookie ) erro = self.dberror(resp.content) if erro != None: self.output.plus('That site is may be vulnerable to %s at %s'%(erro,para)) elif len(param) == 1: resp = self.request.send( url = param[0], method = "GET", payload = None, headers = None, cookies = self.cookie ) erro = self.dberror(resp.content) if erro != None: self.output.plus('That site is may be vulnerable to %s at %s'%(erro,param[0])) except Exception,e: pass
def run(self): info = { 'name': 'Xss', 'fullname': 'Cross Site Scripting', 'author': 'Momo Outaadi (M4ll0k)', 'description': 'Find Cross Site Scripting (XSS) vulnerability' } db = open('data/xss.txt', 'rb') dbfiles = [x.split('\n') for x in db] self.output.test('Checking cross site scripting...') try: for payload in dbfiles: for url in self.urls: # replace queries with payload param = params.Params(url, payload[0]).process() if len(param) > 1: for para in param: resp = self.request.send(url=para, method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search(payload[0], resp.content, re.I): self.output.plus( 'That site is may be vulnerable to Cross Site Scripting (XSS) at %s' % para) elif len(param) == 1: resp = self.request.send(url=param[0], method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search(payload[0], resp.content, re.I): self.output.plus( 'That site is may be vulnerable to Cross Site Scripting (XSS) at %s' % param[0]) except Exception, e: pass
def run(self): info = { 'name': 'XPath', 'fullname': 'XPath Injection', 'author': 'Momo Outaadi (M4ll0k)', 'description': 'Find XPATH Injection' } db = open('data/xpath.txt', 'rb') dbfiles = [x.split('\n') for x in db] self.output.test('Checking xpath injection...') try: for payload in dbfiles: for url in self.urls: # replace queries with payload param = params.Params(url, payload[0]).process() if len(param) > 1: for para in param: resp = self.request.send(url=para, method="GET", payload=None, headers=None, cookies=self.cookie) if re.search(r'XPATH syntax error:|XPathException', resp.content, re.I): self.output.plus( 'That site is may be vulnerable to XPath Injection at %s' % para) elif len(param) == 1: resp = self.request.send(url=param[0], method="GET", payload=None, headers=None, cookies=self.cookie) if re.search(r'XPATH syntax error:|XPathException', resp.content, re.I): self.output.plus( 'That site is may be vulnerable to XPath Injection at %s' % param[0]) except Exception, e: pass
def run(self): info = { 'name': 'Rfi', 'fullname': 'Remote File Inclusion', 'author': 'Momo Outaadi (M4ll0k)', 'description': 'Find Remote File Inclusion (RFI) Vulnerability' } self.output.test('Checking remote file inclusion...') db = open('data/rfi.txt', 'rb') dbfiles = [x.split('\n') for x in db] pl = r"root:/root:/bin/bash|default=multi([0])disk([0])rdisk([0])partition([1])\\WINDOWS" try: for payload in dbfiles: for url in self.urls: # replace queries with payload param = params.Params(url, payload[0]).process() if len(param) > 1: for para in param: resp = self.request.send(url=para, method="GET", payload=None, headers=None, cookies=self.cookie) if re.search(pl, resp.content): self.output.plus( 'That site is may be vulnerable to Remote File Inclusion (RFI) at %s' % para) elif len(param) == 1: resp = self.request.send(url=param[0], method="GET", payload=None, headers=None, cookies=self.cookie) if re.search(pl, resp.content): self.output.plus( 'That site is may be vulnerable to Remote File Inclusion (RFI) at %s' % param[0]) except Exception, e: pass
def run(self): info = { 'name': 'Html', 'fullname': 'Html code injection', 'author': 'Momo Outaadi (M4ll0k)', 'description': 'Find html code injection' } self.output.test('Checking html injection...') try: payload = "<h1><a href=\"http://www.google.com\">Click Spaghetti!</a></h1>" for url in self.urls: # replace queries with payload param = params.Params(url, payload).process() if len(param) > 1: for para in param: resp = self.request.send(url=para, method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search(payload, resp.content): self.output.plus( 'That site is may be vulnerable to HTML Code Injection at %s' % para) elif len(param) == 1: resp = self.request.send(url=param[0], method="GET", payload=None, headers=None, cookies=self.cookie) if resp.status_code == 200: if re.search(payload, resp.content): self.output.plus( 'That site is may be vulnerable to HTML Code Injection at %s' % param[0]) except Exception, e: pass
''' global model, session model2 = decode2(self.data_decode, self.scaling_factor, self.list_discrete, self.records_d, plot=plot, args=args) self.decode_model = model2 ####################################################################################################### if __name__ == '__main__': root = os.path.dirname(os.path.abspath(__file__)) args = params.Params(root + '/repo/VAEM/hyperparameters/bank_plot.json') if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) rs = 42 # random seed fast_plot = 0 #Instantiating Model ################################## model = Model() #Defining Data and Variables categories = [ 'job', "marital", "education", 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome', 'y' ] cat_ind = [0, 1, 2, 3, 4, 5, 6, 7]
import numpy as np from utils import params # file iclude parmater of model import tensorflow as tf # tf.__version__ == 2.1 import matplotlib.pyplot as plt from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.layers import (Activation, Conv3D, Dense, Dropout, Flatten, MaxPooling3D, MaxPooling2D, LeakyReLU,BatchNormalization, Reshape, AveragePooling3D, GlobalAveragePooling3D, Input, concatenate, LSTM, TimeDistributed, Bidirectional) # =========================base model pre-train I3D params = params.Params() WEIGHTS_NAME = ['rgb_kinetics_only', 'flow_kinetics_only', 'rgb_imagenet_and_kinetics', 'flow_imagenet_and_kinetics'] # path to pretrained models with top (classification layer) WEIGHTS_PATH = { 'rgb_kinetics_only' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/rgb_inception_i3d_kinetics_only_tf_dim_ordering_tf_kernels.h5', 'flow_kinetics_only' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/flow_inception_i3d_kinetics_only_tf_dim_ordering_tf_kernels.h5', 'rgb_imagenet_and_kinetics' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/rgb_inception_i3d_imagenet_and_kinetics_tf_dim_ordering_tf_kernels.h5', 'flow_imagenet_and_kinetics' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/flow_inception_i3d_imagenet_and_kinetics_tf_dim_ordering_tf_kernels.h5' } # path to pretrained models with no top (no classification layer) WEIGHTS_PATH_NO_TOP = { 'rgb_kinetics_only' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/rgb_inception_i3d_kinetics_only_tf_dim_ordering_tf_kernels_no_top.h5', 'flow_kinetics_only' : 'https://github.com/dlpbc/keras-kinetics-i3d/releases/download/v0.2/flow_inception_i3d_kinetics_only_tf_dim_ordering_tf_kernels_no_top.h5',
import pandas as pd import sklearn.preprocessing as preprocessing from sklearn.metrics import mean_squared_error from sklearn.feature_selection import mutual_info_regression, mutual_info_classif plt.switch_backend('agg') tfd = tf.contrib.distributions import utils.process as process import json import utils.params as params import seaborn as sns; sns.set(style="ticks", color_codes=True) /home/paperspace/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters 1.4.1 load hyperparameters In [2]: args = params.Params('./hyperparameters/bank_plot.json') if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) rs = 42 # random seed fast_plot = 0 Load Bank Data In [3]: seed = 3000 bank_raw = pd.read_csv("./data/bank/bankmarketing_train.csv") print(bank_raw.info()) label_column="y" matrix1 = bank_raw.copy() process.encode_catrtogrial_column(matrix1, ["job"])
import utils.trainer as trainer #import models.model as model import models.joint_model as model #import models.joint_model_linear as model #Simple, linear encoder, linear decoder, no activation functions import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch.autograd import Variable from scipy.stats import multivariate_normal import utils.params as params import sys args = params.Params('./hyperparameters/binaryAB.json') #df_raw = process.read_csv('https://raw.githubusercontent.com/Kwanikaze/vpandas/master/data/data_2.csv') df_raw = pd.DataFrame(data={'A':[0,1],'B':[0,1]}) print("Raw Data") print(df_raw) prob = df_raw.groupby(['A','B']).size().div(len(df_raw)) print("Joint P(A,B)") print(prob) print("Conditional P(B|A)") Aprob = df_raw.groupby('A').size().div(len(df_raw)) #print(Aprob) probBgivenA = df_raw.groupby(['A', 'B']).size().div(len(df_raw)).div(Aprob, axis=0, level='A') print(probBgivenA) print("Conditional P(A|B)") Bprob = df_raw.groupby('B').size().div(len(df_raw))
if verbosity >= 2: print(*s, flush=True) #################################################################################################### global model, session def init(*kw, **kwargs): global model, session model = Model(*kw, **kwargs) session = None # Making Args Global Variable args = params.Params('hyperparameters/bank_plot.json') if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) rs = 42 # random seed fast_plot = 0 #################################################################################################### ##### Custom code ################################################################################# cols_ref_formodel = ['none'] ### No column group ########## Loading Of Data ######################################################################### def load_data(filePath, categories, cat_col, num_cols, discrete_cols, targetCol, nsample, delimiter): global Data_decompressed, Mask_decompressed
import utils.process as process import utils.checks as checks import models.model as model import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch.autograd import Variable from scipy.stats import multivariate_normal import utils.params as params #dict of hyperparameters args = params.Params('./hyperparameters/binaryAB_sigmoid.json') df = process.read_csv( 'https://raw.githubusercontent.com/Kwanikaze/vpandas/master/data/data_2.csv' ) input_dims = {'A': 3, 'B': 3} #dicts ordered data2 = False attributes = list(df.columns) #assumes each attribute has a single column df = process.duplicate_dataframe(df, attributes, duplications=100) num_samples = int(df.shape[0]) sample1_df = df[attributes].sample(n=num_samples, random_state=args.random_seed) sample1_df_OHE = process.one_hot_encode_columns(sample1_df, attributes) #print(sample1_df) # use gpu if available
import utils.process as process import utils.checks as checks import models.model as model import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch.autograd import Variable from scipy.stats import multivariate_normal import utils.params as params #dict of hyperparameters args = params.Params('./hyperparameters/trinaryABC.json') df_raw = process.read_csv( 'https://raw.githubusercontent.com/Kwanikaze/vpandas/master/data/data_3_ABC.csv' ) input_dims = {'A': 3, 'B': 3, 'C': 3} #dicts ordered attributes = list(df_raw.columns) #assumes each attribute has a single column real_vars = [] cat_vars = [x for x in attributes if x not in real_vars] df, df_OHE, mms_dict = process.preprocess( df_raw, args, real_vars, cat_vars, duplications=100) #mms is min_max_scalar train_df, train_df_OHE, val_df, val_df_OHE, test_df, test_df_OHE = process.split( df, df_OHE, [0.7, 0.85]) num_samples = int(train_df.shape[0])
import utils.trainer as trainer #import models.model as model import models.joint_model as model import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch from torch.autograd import Variable from scipy.stats import multivariate_normal import utils.params as params import sys #dict of hyperparameters args = params.Params('./hyperparameters/AcontsBcat.json') df_raw = process.read_csv( 'https://raw.githubusercontent.com/Kwanikaze/vpandas/master/data/data_A_conts_B_Cat.csv' ) input_dims = {'A': 1, 'B': 3} attributes = list(df_raw.columns) #assumes each attribute has a single column #attributes = ['B','A'] real_vars = ['A'] cat_vars = [x for x in attributes if x not in real_vars] df, df_OHE, mms_dict = process.preprocess( df_raw, args, real_vars, cat_vars, duplications=200) #mms is min_max_scalar train_df, train_df_OHE, val_df, val_df_OHE, test_df, test_df_OHE = process.split( df, df_OHE, [0.7, 0.85])