Ejemplo n.º 1
0
 def add_snapshots_to_namespace(self, namespace):
     prefix = 'snapshots'
     importer = namespace.vars['inst_importer']
     exporter = namespace.vars['inst_exporter']
     snapshot_source = SnapshotStateOpenStack(exporter).create_snapshot()
     snapshot_dest = SnapshotStateOpenStack(importer).create_snapshot()
     path_source = "%s/source/%s.snapshot" % (prefix, snapshot_source.timestamp)
     path_dest = "%s/dest/%s.snapshot" % (prefix, snapshot_dest.timestamp)
     namespace.vars['snapshots']['source'].append({'path': path_source, 'timestamp': snapshot_source.timestamp})
     namespace.vars['snapshots']['dest'].append({'path': path_dest, 'timestamp': snapshot_dest.timestamp})
     dump_to_file(path_source, snapshot_source)
     dump_to_file(path_dest, snapshot_dest)
Ejemplo n.º 2
0
 def run(self, inst_exporter=None, inst_importer=None, snapshots={'source': [], 'dest': []}, **kwargs):
     snapshot_source = SnapshotStateOpenStack(inst_exporter).create_snapshot()
     snapshot_dest = SnapshotStateOpenStack(inst_importer).create_snapshot()
     path_source = "%s/source/%s.snapshot" % (self.prefix, snapshot_source.timestamp)
     path_dest = "%s/dest/%s.snapshot" % (self.prefix, snapshot_dest.timestamp)
     snapshots['source'].append({'path': path_source, 'timestamp': snapshot_source.timestamp})
     snapshots['dest'].append({'path': path_dest, 'timestamp': snapshot_dest.timestamp})
     dump_to_file(path_source, snapshot_source)
     dump_to_file(path_dest, snapshot_dest)
     return {
         'snapshots': snapshots
     }
Ejemplo n.º 3
0
def get_readable_html(url, htmlfile=None):
    """given an url, return the local path to a readable version"""
    html = urllib.urlopen(url).read()
    sanitized_html = BeautifulSoup(html).prettify()  # detect encoding
    readable = readability.Document(sanitized_html)
    body = readable.summary()
    html = u"""
        <html>
        <head>
            <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
        </head>
        <body>
            %(body)s
        </body>
        <html>
        """  % {'body': body}
    return dump_to_file(BeautifulSoup(html).prettify(), htmlfile)
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        prog='memdumper',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent(""))

    parser.add_argument('-p',
                        '--process',
                        help='the process that you will be injecting to')
    parser.add_argument(
        '-o',
        '--outdir',
        type=str,
        metavar="dir",
        help='provide full output directory path. (def: \'dump\')')
    parser.add_argument('-U',
                        '--usb',
                        action='store_true',
                        help='device connected over usb')
    parser.add_argument('-v', '--verbose', action='store_true', help='verbose')
    parser.add_argument(
        '-r',
        '--read-only',
        action='store_true',
        help="dump read-only parts of memory. More data, more errors")
    arguments = parser.parse_args()

    # Define Configurations
    process = arguments.process
    max_chunk_size = 1e7  # 10MB

    outdir = os.path.join(os.getcwd(), "dump")
    if arguments.outdir:
        outdir = arguments.outdir

    PERMS = 'rw-'
    if arguments.read_only:
        PERMS = 'r--'

    if arguments.verbose:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)
    else:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.INFO)

    session = None
    try:
        if arguments.usb:
            session = frida.get_usb_device().attach(process)
        else:
            session = frida.attach(process)
    except Exception as e:
        logging.error("Can't connect to App")
        logging.error(str(e))
        sys.exit()

    # Selecting Output directory
    shutil.rmtree(outdir, ignore_errors=True)
    os.makedirs(outdir)

    logging.info("Starting Memory dump...")

    script = session.create_script("""'use strict';

        rpc.exports = {
          enumerateRanges: function (prot) {
            return Process.enumerateRangesSync(prot);
          },
          readMemory: function (address, size) {
            return Memory.readByteArray(ptr(address), size);
          }
        };

        """)
    script.on("message", utils.frida_on_message)
    script.load()

    agent = script.exports
    mem_ranges = agent.enumerate_ranges(PERMS)

    # TODO: Make an extension to dump all region names
    # import json
    # logging.debug("All sections:")
    # logging.debug("===============")
    # logging.debug(json.dumps(
    # {"path": mem_range["file"]["path"], "base": base, "size": size}, indent=4, sort_keys=True))
    # logging.debug("file_path: %s", str(mem_range["file"]["path"]))

    # Performing the memory dump
    for idx, mem_range in enumerate(mem_ranges):
        base = mem_range["base"]
        size = mem_range["size"]
        if (not "file" in mem_range or not "path" in mem_range["file"]):
            continue

        if not "dalvik-main" in mem_range["file"]["path"]:
            continue

        if size > max_chunk_size:
            logging.debug("Too big, splitting the dump into chunks")
            utils.split_big_chunk(agent, base, size, max_chunk_size, outdir)
            continue
        utils.dump_to_file(agent, base, size, outdir)
        idx += 1
        utils.print_progress(idx,
                             len(mem_ranges),
                             prefix='Progress:',
                             suffix='Complete',
                             max_percent=50)
    logging.info("Done")
Ejemplo n.º 5
0
  def test(self, params):
    print('\n%s: testing...' %datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.results_dir, params['model_name'])
    if 'init_step' not in params or params['init_step'] is None:
      init_step = session.init_step
    else:
      init_step = params['init_step']

    if 'step_num' not in params or params['step_num'] is None:
      step_num = int(np.ceil(np.float(self.fold_size) / self._batch_size))
    else:
      step_num = params['step_num']

    results_file_name = Tester.RESULTS_FILE + '-' + str(init_step) + '-' + \
                        self.fold_name + '-' + str(step_num) + '.json'
    results_file = os.path.join(self.results_dir, results_file_name)
    if not params['load_results'] or not os.path.isfile(results_file):
      session.init(self._classifier, init_step, params['restoring_file'])
      session.start()
      if init_step == 0:
        print 'WARNING: testing an untrained model'
      total_step_num = step_num * params['epoch_num']
      test_num = total_step_num * self._batch_size
      print('%s: test_num=%d' % (datetime.now(), step_num * self._batch_size))
      print('%s: epoch_num=%d' % (datetime.now(), params['epoch_num']))

      results = {}
      results['losses'] = np.zeros(test_num, dtype=np.float32)
      results['probs'] = np.zeros((test_num, Reader.CLASSES_NUM), dtype=np.float32)
      results['labels'] = np.zeros(test_num, dtype=np.int64)

      start_time = time.time()
      for step in range(total_step_num):
        #print('%s: eval_iter=%d' %(datetime.now(), i))
        loss_batch, prob_batch, label_batch = session.run(
          [self._cross_entropy_losses, self._probs, self._input['labels']]
        )
        begin = step * self._batch_size
        results['losses'][begin:begin+self._batch_size] = loss_batch
        results['probs'][begin:begin+self._batch_size, :] = prob_batch
        results['labels'][begin:begin + self._batch_size] = label_batch
        if (step+1) % step_num == 0:
          print "Epoch num: %d" % ((step+1)/step_num)
        if session.should_stop():
          break

      duration = time.time() - start_time
      print('%s: duration = %.1f sec' %(datetime.now(), float(duration)))
      sys.stdout.flush()
      if self.writer is not None:
        summary_str = session.run(self._all_summaries)
        self.writer.write_summaries(summary_str, init_step)

      session.stop()
    else:
      print 'WARNING: using precomputed results'
      results = utils.load_from_file(results_file)

    results['loss'] = np.mean(results['losses']).item()
    results = self.get_all_stats(results)
    if self.writer is not None and not params['load_results']:
      self.writer.write_scalars({'losses/testing/cross_entropy_loss': results['loss'],
                                 'accuracy': results['accuracy']}, init_step)
    utils.dump_to_file(results, results_file)

    return init_step, results['loss']
Ejemplo n.º 6
0
 def test(self):
     for i in range(3):
         files = self.get_files()
         # files.insert(0, '>>%i' % i)
         utils.dump_to_file('findIt_%i.dump' % i, '\n'.join(files))
Ejemplo n.º 7
0
    def test(self, params):
        print('\n%s: testing...' % datetime.now())
        sys.stdout.flush()

        session = Session(self._graph, self.results_dir, params['model_name'])
        if 'init_step' not in params or params['init_step'] is None:
            init_step = session.init_step
        else:
            init_step = params['init_step']

        if 'step_num' not in params or params['step_num'] is None:
            step_num = int(np.ceil(
                np.float(self.fold_size) / self._batch_size))
        else:
            step_num = params['step_num']

        results_file_name = Tester.RESULTS_FILE + '-' + str(init_step) + '-' + \
                            self.fold_name + '-' + str(step_num) + '.json'
        results_file = os.path.join(self.results_dir, results_file_name)
        if not params['load_results'] or not os.path.isfile(results_file):
            session.init(self._classifier, init_step, params['restoring_file'])
            session.start()
            if init_step == 0:
                print 'WARNING: testing an untrained model'
            total_step_num = step_num * params['epoch_num']
            test_num = total_step_num * self._batch_size
            print('%s: test_num=%d' %
                  (datetime.now(), step_num * self._batch_size))
            print('%s: epoch_num=%d' % (datetime.now(), params['epoch_num']))

            results = {}
            results['losses'] = np.zeros(test_num, dtype=np.float32)
            results['probs'] = np.zeros((test_num, Reader.CLASSES_NUM),
                                        dtype=np.float32)
            results['labels'] = np.zeros(test_num, dtype=np.int64)

            start_time = time.time()
            for step in range(total_step_num):
                #print('%s: eval_iter=%d' %(datetime.now(), i))
                loss_batch, prob_batch, label_batch = session.run([
                    self._cross_entropy_losses, self._probs,
                    self._input['labels']
                ])
                begin = step * self._batch_size
                results['losses'][begin:begin + self._batch_size] = loss_batch
                results['probs'][begin:begin +
                                 self._batch_size, :] = prob_batch
                results['labels'][begin:begin + self._batch_size] = label_batch
                if (step + 1) % step_num == 0:
                    print "Epoch num: %d" % ((step + 1) / step_num)
                if session.should_stop():
                    break

            duration = time.time() - start_time
            print('%s: duration = %.1f sec' %
                  (datetime.now(), float(duration)))
            sys.stdout.flush()
            if self.writer is not None:
                summary_str = session.run(self._all_summaries)
                self.writer.write_summaries(summary_str, init_step)

            session.stop()
        else:
            print 'WARNING: using precomputed results'
            results = utils.load_from_file(results_file)

        results['loss'] = np.mean(results['losses']).item()
        results = self.get_all_stats(results)
        if self.writer is not None and not params['load_results']:
            self.writer.write_scalars(
                {
                    'losses/testing/cross_entropy_loss': results['loss'],
                    'accuracy': results['accuracy']
                }, init_step)
        utils.dump_to_file(results, results_file)

        return init_step, results['loss']