def main(): parser = argparse.ArgumentParser(description=\ "An APK Analysis SandBox.") parser.add_argument('-r, --runHeadless', action='store_true',default=False, dest='runHeadless', help='Run emulator without window.') parser.add_argument('-v, --version', action='version', version='SandDroid v0.1beta') #args = parser.parse_args([-p','santoku']) args = parser.parse_args() # SandDroid sandDroid = SandDroid(SandDroidConst.CONFIG_FILE_PATH, theLogger=Logger()) # Set SandDroid sandDroid.runHeadless = args.runHeadless sandDroid.startTime = datetime.datetime.now() # Build logger sandDroid._createLogDir(sandDroid._getLogDir()) logLevel = LogLevel.INFO logger = Logger(theLevel=logLevel, theMode=LogMode.FILE, theLogFile='%s/%s-SandDroid-run.log' % (sandDroid._getLogDir(), Utils.getTimeAsString(sandDroid.startTime)), thePrintAlwaysFlag=True) sandDroid.log = logger sandDroid.run() sandDroid.log.log.close()
def __init__(self, theApkObj, theAvdName, decompressDir, runHeadless, theLogger=Logger()): Thread.__init__(self) # configParser self.configParser = ConfigParser() self.apkObj = theApkObj self.log = theLogger self.curDir = os.path.dirname(__file__) self.staticAnalyzer = None self.dynamicAnalyzer = None self.logcatAnalyzer = None self.startTimeStr = None self.endTimeStr = None self.emulator = None self.emulatorPort = 5554 self.avdName = theAvdName self.runHeadless = runHeadless self.decompressPath = decompressDir self.logcatFile = None self.session = None self.cancelFlag = False # Flag for canceling run
def __init__(self, theConfigFilePath, theLogger=Logger()): # parse config file self.configParser = ConfigParser() # self.configParser.parseFile(theConfigFilePath) self.configParser.generateDirectories() self.log = theLogger # keytool path to parse apk's signature self.keytoolPath = None # sanddroid directories self.mainDir = os.path.dirname(__file__) self.appList = [] # list to store apk file - full path self.runningApps = [] # list to store apk file which in being analyzed self.runHeadless = False self.emulatorStartPort = 5554 self.numThreads = 1 self.maxThreadRuntime = 600 # control running threads self.threadLogFileList = [] # list to store thread log file path self.numFinishedApps = 0 # number of analyzed apps self.numRunningThreads = 0 # number of running threads self.threadList = [] # list of threads, size=numThreads self.threadActiveMask = [ ] # bitmask to determine if thread is active, size=numThreads self.avdsheartbeat = (0, 0, 0, 0) # list avds' times used in one cycle self.avdheartbeat = 0 self.startTime = datetime.datetime.now()
def main(): """ get run args """ parser = argparse.ArgumentParser() #parser.add_argument('--o_dir', type=str, default='options', # help='dir containing options files') parser.add_argument('--o', type=str, help='options file to use') parser.add_argument('--id', type=str, help='ID of current experiment') parser.add_argument('--log_dir', type=str, default='logs', help='ID of current experiment') parser.add_argument('--use_gpu', type=str, default=None, help='list gpus you do not want to use') run_args = parser.parse_args() """ get exp args and merge args""" args_file = run_args.o args = parse_exp_args(args_file) for k,v in vars(run_args).items(): vars(args)[k] = v """ set seeds """ random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) """ multi GPU """ CPU_COUNT = multiprocessing.cpu_count() GPU_COUNT = torch.cuda.device_count() print('Using device {}'.format(args.device)) if 'cuda' in args.device.type: if args.use_gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = args.use_gpu gpu_list = args.use_gpu.split(',') args.device_ids = ['cuda:'+str(x) for x in gpu_list] GPU_COUNT = len(gpu_list) else: args.device_ids = ['cuda:'+str(x) for x in range(GPU_COUNT)] torch.backends.cudnn.benchmark = True if args.multi_gpu: #_DEVICE = torch.device(args.device) args.device = args.device_ids[0] args.batch *= GPU_COUNT print('Total batch size per iteration is now {}'.format(args.batch)) args.num_cpu = CPU_COUNT args.num_gpu = GPU_COUNT """ make logger """ print('Creating logger for log dir {}/{}'.format(args.log_dir, args.id)) logger = Logger(args.id, args_file, args.log_dir, args) """ train """ import pdb; pdb.set_trace() train(args, logger)
def __createThreadLogFile(self, logFileName): """ Create log file for each thread """ logFile = os.path.join(self._getLogDir(), logFileName) threadLogger = Logger(theLevel=self.log.level, theMode=LogMode.FILE, theLogFile=logFile) return threadLogger
def __init__(self, smaliDir, theLogger=Logger()): self.smaliDir = smaliDir self.smaliFiles = [] self.sensitiveAPIs = {} self.sensitiveStrs = {} self.adModules = {} self.urls = [] self.log = theLogger
def __init__(self, decompressPath, avdName, curDir, theLogger=Logger()): self.decompressPath = decompressPath self.avdName = avdName self.mainDir = curDir self.log = theLogger self.emulator = None self.logcatRedirectFile = None
def __init__(self, thePermissions, theWekaPath, theArffFile, theArffTemplate, theModelsDir, theOutFile, theLogger=Logger()): self.permissions = thePermissions self.wekaPath = theWekaPath self.arffFile = theArffFile self.arffTemplate = theArffTemplate self.outFile = theOutFile self.modelsDir = theModelsDir self.log = theLogger
def main(): """ 1. get run args """ run_args = parse_run_args() if bool(run_args.debug): import pudb pudb.set_trace() """ 2. get exp args and merge args""" #args_file = run_args.o #use_gpu = run_args.use_gpu #use_signal = run_args.sig args = parse_exp_args(run_args) #args_file, use_gpu, use_signal) for k, v in vars(run_args).items(): vars(args)[k] = v """ 3. set seeds """ set_seeds(args.seed) """ 4. GPU args """ args = parse_gpu_options(args) """ 5. print model type """ if args.model_type in ['vanilla', 'pnn']: print_model_specs(args) """ check args """ print(DIV) print(args) # import pdb; pdb.set_trace() """ make model """ if args.load_model: model = load_model(args) else: model = create_single_model(args) """ check before launch """ #import pdb; pdb.set_trace() """ make logger """ print(DIV) print('Creating logger for log dir {}/{}'.format(args.log_dir, args.id)) logger = Logger(args.id, args.o, args.log_dir, args) """ launch training """ train(args, model, logger)
def __init__(self, theLogger=Logger()): self.log = theLogger self.dexClassLoders = [] # DexClassLoader Used self.startedServices = [] # Service Started # Network Operation: Open, Close, Receive, Send self.openedNets = [] self.closedNets = [] self.receivedNets = [] self.sentNets = [] self.accessedFiles = {} # 'FdAccess': to get file path self.fileRWs = [] # File Read and Write # Data leakage self.leakedDatas = [] self.phoneCalls = [] # Phone Calls self.sentSMSs = [] # SMS Send self.cryptOperations = [] # Crypto Operation
def __init__(self, apkObj, decompressPath, curDir, theLogger=Logger()): self.apkObj = apkObj self.decompressPath = decompressPath self.mainDir = curDir self.log = theLogger self.basicInfo = {} self.permissions = {} self.sensitiveAPIs = {} self.sensitiveStrs = {} self.adModules = {} self.urls = [] self.mainActivity = None self.activities = [] self.services = [] self.receivers = [] self.providers = [] self.exposedActivities = [] self.exposedServices = [] self.exposedReceivers = [] self.classifyInfo = {} # Sensitive Codes: Native, dynamic, crypto, refelection self.sensitiveCodes = {} # Sensitive Files: file suffix doesn't match magic code self.sensitiveFiles = {} self.riskValue = 0 self.gexfOut = None self.malware = None self.isRepackaged = False self.orgAPKUrl = None
def __init__(self, theSdkPath='', thePort=5554, theImageDir='', thePcapFile='', theRunHeadless=False, theAvdName=None, theLogger=Logger()): self.sdkPath = theSdkPath self.port = thePort self.imageDir = theImageDir self.pcapFile = thePcapFile self.runHeadless = theRunHeadless self.log = theLogger self.avdName = theAvdName self.emulator = None self.logcatRedirectFile = '' self.logcatRedirectProcess = None self.adbProcess = None
def __init__(self, gen, discr, dataset, lr, vis_screen, save_path, l1_coef, l2_coef, pre_trained_gen, pre_trained_disc, batch_size, num_workers, epochs, pretrained=False): self.device = torch.device("cuda") # self.generator = gen.apply(weights_init) # self.generator = self.generator.apply(weights_init_last_gen) # self.generator = self.generator.to(self.device) self.generator = torch.nn.DataParallel( gen.to(self.device )) #torch.nn.DataParallel(self.init_net(gen, self.device)) self.discriminator = torch.nn.DataParallel( discr.to(self.device) ) #torch.nn.DataParallel(self.init_net(discr, self.device)) if pre_trained_disc: self.discriminator.load_state_dict(torch.load(pre_trained_disc)) else: self.discriminator.apply(Utils.weights_init) if pre_trained_gen: self.generator.load_state_dict(torch.load(pre_trained_gen)) else: self.generator.apply(Utils.weights_init) self.dataset = dataset self.noise_dim = 100 self.batch_size = batch_size self.num_workers = num_workers self.lr = lr self.beta1 = 0.5 self.num_epochs = epochs self.l1_coef = l1_coef self.l2_coef = l2_coef self.data_loader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers) self.optimD = torch.optim.Adam(self.discriminator.parameters(), lr=self.lr, betas=(self.beta1, 0.999)) self.optimG = torch.optim.Adam(self.generator.parameters(), lr=self.lr, betas=(self.beta1, 0.999)) self.logger = Logger(vis_screen) self.checkpoints_path = 'checkpoints' self.save_path = save_path
def __init__(self, theLogger=Logger()): self.log = theLogger self.logLines = [] self.logEntryList = [] self.logcatParser = LogcatParser()
FP += val_fp FN += val_fn logger.log('k:{} val{}'.format(k, val)) P = TP * 1.0 / (TP + FP) R = TP * 1.0 / (TP + FN) F = 2 * P * R / (P + R) logger.log('F:{}'.format(F)) return F if __name__ == "__main__": torch.backends.cudnn.benchmark = True args = parse_args() with open(args.params) as f: cfg = yaml.load(f, Loader=yaml.FullLoader) # data dict logger = Logger(args.local_rank, cfg['log_path']) logger.log('start training') assert cfg['network']['backbone'] in ['resnet_18', '34', 'mobilenetv2'] distributed = False if 'WORLD_SIZE' in os.environ: distributed = int(os.environ['WORLD_SIZE']) > 1 if distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') w, h = cfg['dataset']['w'], cfg['dataset']['h'] net = parsingNet(network=cfg['network'], datasets=cfg['dataset']).cuda() if distributed: net = torch.nn.parallel.DistributedDataParallel( net, device_ids=[args.local_rank])
import xlsxwriter import os import sys from utils.db import DB from utils.db_search import DbSearch from utils.jira import JiraSta from utils.redmine import RedmineSta from sheet.write_proj_sheet import write_Proj_sheet from sheet.write_summaryChart_sheet01 import write_summaryChart_sheet01 from sheet.write_summary_sheet02 import write_summary_sheet02 from utils.common import Logger import config filename = "log_test" + time.strftime('%Y-%m-%d_%H_%M_%S') + ".txt" filename_path = os.path.join(config.root_dir, "log", filename) sys.stdout = Logger(filename_path) start_time = time.time() print( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + "---loading completed!") class Statistics(object): # options = {"server": "http://192.168.1.212:8088"} # auth = ("lig", "lig") # user_name:test user_passwd:test connect = DB().conn() """ type:缺陷管理工具类型 redmine_project_id:redmine接口识别的项目代号(查看redmine项目配置获取) redmine_tracker_id:redmine项目缺陷的跟踪代号(通过打印redmine单个issue的tracker_id获取) jira_tag:jira接口识别的项目代号(通过选择JIRA高级查询查看)
val_data_path = "./sample_data/news.dev" test_data_path = "./sample_data/news.test" bv = BertVec() num_epochs = 1 max_text_length = 512 # 主要为了Batch训练 vec_length = 768 num_classes = 2 labels = ["股票", "体育"] lr_init = 0.01 GPU_id = "cuda:0" os.makedirs(work_dir, exist_ok=True) device = torch.device(GPU_id if torch.cuda.is_available() else "cpu") logger = Logger(file_path="{}log.log".format(work_dir)) # --------------------data pipeline------------------- datasets_train = BaselineDataSet(train_data_path, labels, bv, max_text_length, vec_length) datasets_val = BaselineDataSet(val_data_path, labels, bv, max_text_length, vec_length) # num_workers不为0的话会死锁(原因见BaselineDataSet) dataLoader_train = torch.utils.data.DataLoader(datasets_train, batch_size=32, shuffle=True, num_workers=0) dataLoader_val = torch.utils.data.DataLoader(datasets_val, batch_size=32, shuffle=False,