def handleThreadResults(self): """ Handle thread results """ self.log.info(os.linesep) self.log.info('Handling thread results...') if not self.endTimeStr: endTime = datetime.datetime.now() self.endTimeStr = '%s %s' % (Utils.getLogDateAsString(endTime), Utils.getLogTimeAsString(endTime)) fileMd5 = self.apkObj.getMd5Hash().upper() # Create report directory apkReportDir = self.__createReportDir(fileMd5) # Store resources #self._storeResources(apkReportDir, fileMd5) # Do clear work self._doClearWork(fileMd5) # Update Database self.log.info("Update database") self.__updateDatabase()
def _getLogDir(self): """ Get log directory """ logRootDir = self.configParser.getLogDir() logDir = '%s/%s-%s' % (logRootDir, Utils.getDateAsString(self.startTime), Utils.getTimeAsString(self.startTime)) return logDir
def run(self, save_dir="."): final_result = self._research() header = [ 'Title', 'Author', 'Download_link', 'From', 'Year', 'Cited Num', 'Abstract' ] fname = 'papers_{}.xlsx'.format(Utils.get_current_date()) Utils.xlsx_writer(save_dir, fname, final_result, header)
def getLog(self): """ Returns the (full) logcat output """ args = ['shell', 'logcat', '-d', '-v', 'thread', '&&', '%sadb' % Utils.getAdbPath(self.sdkPath), '-s', 'emulator-%s' % str(self.port), 'shell', 'logcat', '-b', 'events', '-d', '-v', 'thread', '&&', '%sadb' % Utils.getAdbPath(self.sdkPath), '-s', 'emulator-%s' % str(self.port), 'shell', 'logcat', '-b', 'radio', '-d', '-v', 'thread'] logcat = self.runAdbCommand(args)[0] return logcat
def __getThreadLogFile(self, fileMd5): """ Return log file name for app runner thread """ analyzeDateTime = datetime.datetime.now() analyzeDate = Utils.getDateAsString(analyzeDateTime) analyzeTime = Utils.getTimeAsString(analyzeDateTime) logFileName = '%s-%s-%s.log' % (fileMd5, analyzeDate, analyzeTime) return logFileName
def start(self): """ Starts the emulator with DroidBox images """ self.log.info('Start emulator', setTime=True) try: args = ['%s/emulator' % Utils.getEmulatorPath(self.sdkPath)] if self.avdName is not None: args.extend(['-avd', self.avdName]) args.extend(['-tcpdump', self.pcapFile]) args.extend(['-system', '%s/system.img' % self.imageDir]) args.extend(['-ramdisk', '%s/ramdisk.img' % self.imageDir]) args.extend(['-snapstorage','%s/snapshots.img' % self.imageDir]) args.extend(['-sdcard','%s/sdcard.img' % self.imageDir]) args.extend(['-no-snapshot-save']) args.extend(['-port', str(self.port)]) self.log.info('- args: %s' % ' '.join(args), setTime=True) self.emulator = subprocess.Popen(' '.join(args),shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, osErr: raise EmulatorClientError('Failed to start emulator \'%s\': %s' % (args, osErr.strerror), theCode=EmulatorClientError.START_EMULATOR_ERROR, theBaseError=osErr)
def startLogcatRedirect(self, theFile='/data/local/logcat.log', theMaxSize=4096): """ Start logcat redirection. """ self.log.info('Start logcat redirect, file: %s, size: %dkBytes' % (theFile, theMaxSize), setTime=True) if not self.logcatRedirectProcess is None: self.endLogcatRedirect() if not self.logcatRedirectProcess is None: raise EmulatorClientError('Logcat redirect is already running', EmulatorClientError.LOGCAT_REDIRECT_RUNNING) try: args = ['%s/adb' % Utils.getAdbPath(self.sdkPath), '-s', 'emulator-%s' % str(self.port), 'shell', 'logcat', '-v', 'thread', '-f', theFile, '-r', str(theMaxSize)] self.logcatRedirectProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) #if self.verbose: # print self.logcatRedirectProcess.communicate() except OSError, osErr: raise EmulatorClientError('Failed to run adb command \'%s\': %s' % (args, osErr.strerror), theCode=EmulatorClientError.ADB_RUN_ERROR, theBaseError=osErr)
def main(): parser = argparse.ArgumentParser(description=\ "An APK Analysis SandBox.") parser.add_argument('-r, --runHeadless', action='store_true',default=False, dest='runHeadless', help='Run emulator without window.') parser.add_argument('-v, --version', action='version', version='SandDroid v0.1beta') #args = parser.parse_args([-p','santoku']) args = parser.parse_args() # SandDroid sandDroid = SandDroid(SandDroidConst.CONFIG_FILE_PATH, theLogger=Logger()) # Set SandDroid sandDroid.runHeadless = args.runHeadless sandDroid.startTime = datetime.datetime.now() # Build logger sandDroid._createLogDir(sandDroid._getLogDir()) logLevel = LogLevel.INFO logger = Logger(theLevel=logLevel, theMode=LogMode.FILE, theLogFile='%s/%s-SandDroid-run.log' % (sandDroid._getLogDir(), Utils.getTimeAsString(sandDroid.startTime)), thePrintAlwaysFlag=True) sandDroid.log = logger sandDroid.run() sandDroid.log.log.close()
def runAdbCommand(self, theArgs): """ Runs a simple adb command """ args = ['%s/adb' % Utils.getAdbPath(self.sdkPath), '-s', 'emulator-%s' % str(self.port)] args.extend(theArgs) self.log.info('-Exec adb command: %s' % args, setTime=True) try: self.adbProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, osErr: raise EmulatorClientError('-Failed to run adb command \'%s\': %s' % (args, osErr.strerror), theCode=EmulatorClientError.ADB_RUN_ERROR, theBaseError=osErr)
def run(self): """ Run SandDroid to analyze apk files """ # check configure if not self.__isConfigValid(): return else: # start getting apps self.log.info('Start AppGettingThread to get apps...') appGettingThread = AppGettingThread(self) appGettingThread.daemon = False appGettingThread.start() # Run self.log.info('Starting Analyze....') self.log.info(SandDroidConst.LONG_EQUAL_SIGN) # Inits for i in xrange(self.numThreads): self.threadList.append(None) self.threadActiveMask.append(False) # build database session dataModel = DataModel(self.configParser.getDbUsr(), self.configParser.getDbPswd(), self.configParser.getDbHost(), self.configParser.getDbPort(), self.configParser.getDbName()) try: Session = dataModel.createSession(False) session = Session() except Exception, e: print 'Can not create sqlAlchemy session instance' return while True: try: # Get app and start thread if self.numRunningThreads < self.numThreads and len( self.appList): # Get app app = self.appList.pop(0) fileMd5 = Utils.calcMD5Hash(app).upper() # if self.__isDbExist(session, fileMd5, app): # continue # Check for inactive thread threadIndex = -1 threadIndex = self.__checkInactiveThreads(threadIndex) if threadIndex == -1: self.log.error( 'No free thread index found even though numRunningThreads < numThreads' ) continue self.runningApps.append(app) self.log.info( 'Free thread found (%d) for analyzing %s' % (threadIndex + 1, os.path.basename(app))) self.log.info('Analyzing %s...' % app) # Determine logger for each thread logFileName = self.__getThreadLogFile(fileMd5) threadLogger = self.__createThreadLogFile(logFileName) self.threadLogFileList.append(logFileName) # build decompress directory decompressDir = self.__buildDecompressDir(fileMd5) try: apkObj = apk.APK(app, self.keytoolPath, decompressDir, threadLogger) except Exception: self.__processFailed(app, fileMd5) ex = traceback.format_exc() self.log.exce(ex) continue if not apkObj.is_valid_APK(): self.log.info('%s is an invalid APK file!' % app) self.__processFailed(app, fileMd5) continue apkObj.main_dir = self.mainDir apkObj.md5 = fileMd5 # Build thread and start runnerThread = self.__buildRunnerThread( threadIndex, apkObj, decompressDir, threadLogger) runnerThread.start() # No free thread -> check timing else: if len(self.appList): self.log.info( 'No free thread found, wait for free thread') elif self.numRunningThreads < self.numThreads: self.log.info( 'Listen to the app folder,waiting...') # Check for active threads self.__doActiveThreads() # Sleep time.sleep(SandDroidConst.THREAD_SLEEP_SHORT_TIME) time.sleep(SandDroidConst.THREAD_SLEEP_LONG_TIME) except Exception, e: print e break
def run(self): """ Run the thread """ fileMd5 = self.apkObj.getMd5Hash().upper() # Log information self.log.info('Start thread to analyzing...') self.log.info('FileMD5: %s' % fileMd5) # build database session self.log.info('Create SqlAlchemy Session...') '''dataModel = DataModel(self.configParser.getDbUsr(), self.configParser.getDbPswd(), self.configParser.getDbHost(), self.configParser.getDbPort(), self.configParser.getDbName() ) try: Session = dataModel.createSession(False) self.session = Session() except Exception,e: self.log.exce("Create SqlAlchemy session error %s" % e) return ''' # Run try: startTime = datetime.datetime.now() self.startTimeStr = '%s %s' % (Utils.getLogDateAsString(startTime), Utils.getLogTimeAsString(startTime)) self.logcatFile = self.__getLogcatFilePath() # Static Analysis self.checkForCancelation() self.log.info(os.linesep) self.log.info('==Static Analysis...') self.staticAnalyze() # Dynamic Analysis self.checkForCancelation() #self.log.info(os.linesep) #self.log.info('==Dynamic Analysis...') #self.dynamicAnalyze() # Shutdown Emulator #self.log.info('Shutdown emulator %s' % self.avdName) #self.emulator.shutDown() # Logcat Analysis #self.log.info(os.linesep) #self.log.info('== Analyze Logcat file...') #self.logcatAnalyze(self.logcatFile ) # End endTime = datetime.datetime.now() self.endTimeStr = '%s %s' % (Utils.getLogDateAsString(endTime), Utils.getLogTimeAsString(endTime)) except Exception: exc = traceback.format_exc() self.log.exce(exc)
def _train_gan(self, cls, steps_p_e): criterion = nn.BCELoss() l2_loss = nn.MSELoss() l1_loss = nn.L1Loss() #teration = 0 for epoch in range(self.num_epochs): for i in range(steps_p_e): for j, sample in enumerate(self.data_loader, 0): sample = sample sample = sample iteration = i print(iteration) right_images = sample['right_images'] * 255 right_embed = sample['right_embed'] wrong_images = sample['wrong_images'] * 255 right_images = right_images.sub_(127.5).div_(127.5) wrong_images = wrong_images.sub_(127.5).div_(127.5) right_images = Variable(right_images.float()).cuda() right_embed = Variable(right_embed.float()).cuda() wrong_images = Variable(wrong_images.float()).cuda() real_labels = torch.ones(right_images.size(0)) fake_labels = torch.zeros(right_images.size(0)) # ======== One sided label smoothing ========== # Helps preventing the discriminator from overpowering the # generator adding penalty when the discriminator is too confident # ============================================= smoothed_real_labels = torch.FloatTensor( Utils.smooth_label(real_labels.numpy(), -0.1)) real_labels = Variable(real_labels).cuda() smoothed_real_labels = Variable(smoothed_real_labels).cuda() fake_labels = Variable(fake_labels).cuda() # Train the discriminator self.discriminator.zero_grad() outputs, activation_real = self.discriminator( right_images, right_embed) real_loss = criterion(outputs, smoothed_real_labels) real_score = outputs if cls: outputs, _ = self.discriminator(wrong_images, right_embed) wrong_loss = criterion(outputs, fake_labels) wrong_score = outputs noise = Variable(torch.randn(right_images.size(0), 100)).cuda() noise = noise.view(noise.size(0), 100, 1, 1) fake_images = self.generator(right_embed, noise) outputs, _ = self.discriminator(fake_images, right_embed) fake_loss = criterion(outputs, fake_labels) fake_score = outputs d_loss = real_loss + fake_loss if cls: d_loss = d_loss + wrong_loss d_loss.backward() self.optimD.step() # Train the generator self.generator.zero_grad() noise = Variable(torch.randn(right_images.size(0), 100)).cuda() noise = noise.view(noise.size(0), 100, 1, 1) fake_images = self.generator(right_embed, noise) outputs, activation_fake = self.discriminator( fake_images, right_embed) _, activation_real = self.discriminator( right_images, right_embed) activation_fake = torch.mean(activation_fake, 0) activation_real = torch.mean(activation_real, 0) # ======= Generator Loss function============ # This is a customized loss function, the first term is the regular cross entropy loss # The second term is feature matching loss, this measure the distance between the real and generated # images statistics by comparing intermediate layers activations # The third term is L1 distance between the generated and real images, this is helpful for the conditional case # because it links the embedding feature vector directly to certain pixel values. # =========================================== g_loss = criterion(outputs, real_labels) \ + self.l2_coef * l2_loss(activation_fake, activation_real.detach()) \ + self.l1_coef * l1_loss(fake_images, right_images) g_loss.backward() self.optimG.step() self.logger.log_iteration_gan(iteration, d_loss, g_loss, real_score, fake_score) if iteration % 5 == 0: self.logger.draw(right_images, fake_images) #self.logger.plot_epoch_w_scores(epoch) if (epoch) % 10 == 0: Utils.save_checkpoint(self.discriminator, self.generator, self.checkpoints_path, self.save_path, epoch)