コード例 #1
0
ファイル: seed.py プロジェクト: zhuyue1314/w3af
class seed(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    def __init__(self, w3af_core):
        """
        :param w3af_core: The w3af core that we'll use for status reporting
        """
        super(seed, self).__init__()
        self.name = "Seed"

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()

    def get_result(self, timeout=0.5):
        return self._out_queue.get_nowait()

    def has_pending_work(self):
        return self._out_queue.qsize() != 0

    def join(self):
        return

    def terminate(self):
        return

    def seed_output_queue(self, target_urls):
        """
        Create the first fuzzable request objects based on the targets and put
        them in the output Queue.

        This will start the whole discovery process, since plugins are going
        to consume from that Queue and then put their results in it again in
        order to continue discovering.
        """
        # We only want to scan pages that are in current scope
        in_scope = lambda fr: fr.get_url().get_domain() == url.get_domain()

        for url in target_urls:
            try:
                #
                #    GET the initial target URLs in order to save them
                #    in a list and use them as our bootstrap URLs
                #
                response = self._w3af_core.uri_opener.GET(url, cache=True)
            except (w3afMustStopOnUrlError, w3afException, w3afMustStopException), w3:
                om.out.error("The target URL: %s is unreachable." % url)
                om.out.error("Error description: %s" % w3)
            except Exception, e:
                om.out.error("The target URL: %s is unreachable " "because of an unhandled exception." % url)
                om.out.error('Error description: "%s". See debug ' "output for more information." % e)
                om.out.error("Traceback for this error: %s" % traceback.format_exc())
            else:
コード例 #2
0
def download_sifts():
    # download all sift files

    # first try to download a tarball containing all the sift xmls

    if not os.path.exists(SIFTS_TARBALL):
        try:
            urlretrieve(S3_BUCKET_URL.format('sifts.tar'), SIFTS_TARBALL)
        except RuntimeError:
            logging.warning('failed downloading sifts tarball')
        else:
            tf = tarfile.TarFile(SIFTS_TARBALL)
            tf.extractall(BASEDIR)  # the tarball contains directory data/sifts

    try:
        os.mkdir(SIFTS_FILE.format(''))
    except FileExistsError:
        pass
    ftp = ftplib.FTP("ftp.ebi.ac.uk")
    ftp.login()
    ftp.cwd('/pub/databases/msd/sifts/xml')
    filenames = ftp.nlst()  # get filenames within the directory
    ftp.quit()  # This is the “polite” way to close a connection
    filename_queue = Queue()
    for filename in filenames:
        filename_queue.put(filename)

    ftp_processes = [
        Process(target=download_ftp, args=(filename_queue, ))
        for _ in range(10)
    ]
    for process in ftp_processes:
        process.start()

    try:
        while not filename_queue.empty():
            print('{}/{} sifts downloaded'.format(
                len(filenames) - filename_queue.qsize(), len(filenames)),
                end='\r')
            time.sleep(1)

        for process in ftp_processes:
            process.join()
    except KeyboardInterrupt:
        # TODO kill not supported by multiprocessing.dummy..
        for process in ftp_processes:
            process.kill()
コード例 #3
0
def downloadFiles(downloadFiles, n_downloadFiles, size_downloadFiles):
    global q, writeDict
    downloadChunks = partitionDownload(downloadFiles, n_downloadFiles, size_downloadFiles)
    n_downloadChunks = len(downloadChunks)
    n_threads = min(n_concurrentConnections, n_downloadChunks)
    if (n_threads < n_concurrentConnections):
        print('Number of connection threads was limited by download size.')

    q = Queue()
    completedDict = {}
    writeDict = {}
    delList = []
    for df in downloadFiles:
        writeDict[df['file_name']] = [0, int(df['file_size']/size_chunk)]
    print('Starting {} connection threads.'.format(n_threads))
    with progress.Bar(expected_size=size_downloadFiles) as bar:
        tp = ThreadPool(n_threads)
        tp.imap_unordered(downloadChunk, downloadChunks)
        current_size = 0
        while current_size < size_downloadFiles:
            if q.qsize() > 0:
                f, chunk_id, content = q.get()
                completedDict[(f['file_name'], chunk_id)] = (content, f['path'])
            for writer in writeDict:
                chunk = completedDict.get((writer, writeDict[writer][0]))
                if chunk != None:
                    current_size += writeChunk(chunk, writer, writeDict[writer][0])
                    del completedDict[(writer, writeDict[writer][0])]
                    writeDict[writer][0] += 1
                    if writeDict[writer][0] > writeDict[writer][1]:
                        delList.append(writer)
            if len(delList) > 0:
                for item in delList:
                    del writeDict[item]
                delList.clear()
            bar.show(current_size)
        tp.close()
        tp.join()
    print('Download complete.')
コード例 #4
0
def pipe_dream(layer, logger, args, backward_event, targets_queue, e,
               data_size, trainloader):

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(layer.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=5e-4)
    layer.train()

    if dist.get_rank() == 0:
        criterion.cuda(0)
        output_queue = ThreadQueue(2)
        data_iter = iter(trainloader)
        batch_idx = 0
        while True:
            try:
                if output_queue.qsize() == 2:
                    backward_event.wait()
                    optimizer.zero_grad()
                    grad = torch.zeros([args.batch_size, 128, 16, 16])
                    dist.recv(tensor=grad, src=1)
                    outputs = output_queue.get()
                    outputs.backward(grad.cuda(0))
                    optimizer.step()
                    backward_event.clear()
                    continue
                else:
                    inputs, targets = next(data_iter)
                    inputs = inputs.cuda(0)
                    targets_queue.put(targets.numpy(), block=False)
                    outputs = layer(inputs)
                    send_opt = dist.isend(tensor=outputs.cpu(), dst=1)
                    send_opt.wait()
                    output_queue.put(outputs)
                    batch_idx += 1
            except StopIteration as stop_e:
                send_opt = dist.isend(tensor=torch.zeros(0), dst=1)
                send_opt.wait()
                while output_queue.qsize() > 0:
                    #backward_event.wait()
                    optimizer.zero_grad()
                    grad = torch.zeros([args.batch_size, 128, 16, 16])
                    dist.recv(tensor=grad, src=1)
                    outputs = output_queue.get()
                    outputs.backward(grad.cuda(0))
                    optimizer.step()
                    #backward_event.clear()
                break
    elif dist.get_rank() == 1:
        batch_idx = 0
        train_loss = 0
        correct = 0
        total = 0
        criterion.cuda(1)
        while True:
            print("while........................")
            try:
                rec_val = torch.zeros([args.batch_size, 128, 16, 16])
                dist.recv(tensor=rec_val, src=0)
                print("recv.......")
            except RuntimeError as error:
                print("runtime........................")
                #e.wait()
                break
            rec_val = rec_val.cuda(1)
            rec_val.requires_grad_()
            optimizer.zero_grad()
            outputs = layer(rec_val)
            targets = targets_queue.get(block=True, timeout=2)
            targets = torch.from_numpy(targets).cuda(1)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            progress_bar(
                batch_idx, data_size, 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (train_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))
            if not backward_event.is_set():
                print("set.....")
                backward_event.set()
            send_opt = dist.isend(tensor=rec_val.grad.cpu(), dst=0)
            print("send.....")
            if batch_idx % 10 == 0:
                logger.error("train:" + str(train_loss / (batch_idx + 1)))

            batch_idx += 1
コード例 #5
0
class seed(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    def __init__(self, w3af_core):
        """
        :param w3af_core: The w3af core that we'll use for status reporting
        """
        super(seed, self).__init__(name='SeedController')
        self.name = 'Seed'

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()

    def get_result(self, timeout=0.5):
        return self._out_queue.get_nowait()

    def has_pending_work(self):
        return self._out_queue.qsize() != 0

    def join(self):
        return

    def terminate(self):
        return

    def seed_output_queue(self, target_urls):
        """
        Create the first fuzzable request objects based on the targets and put
        them in the output Queue.

        This will start the whole discovery process, since plugins are going
        to consume from that Queue and then put their results in it again in
        order to continue discovering.
        """
        # We only want to scan pages that are in current scope
        in_scope = lambda fr: fr.get_url().get_domain() == url.get_domain()

        for url in target_urls:
            try:
                #
                #    GET the initial target URLs in order to save them
                #    in a list and use them as our bootstrap URLs
                #
                response = self._w3af_core.uri_opener.GET(url, cache=True)
            except ScanMustStopException, w3:
                om.out.error('The target server is unreachable. Stopping.')
                raise w3
            except HTTPRequestException, hre:
                msg = 'The target URL: "%s" is unreachable. Exception: "%s".'
                om.out.error(msg % (url, hre))
            except Exception, e:
                msg = 'The target URL: "%s" is unreachable because of an' \
                      ' unhandled exception. Error description: "%s". See' \
                      ' debug output for more information.\n' \
                      'Traceback for this error:\n%s'
                om.out.error(msg % (url, e, traceback.format_exc()))
コード例 #6
0
ファイル: seed.py プロジェクト: batmanWjw/w3af
class seed(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    def __init__(self, w3af_core):
        """
        :param w3af_core: The w3af core that we'll use for status reporting
        """
        super(seed, self).__init__(name='SeedController')
        self.name = 'Seed'

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()

    def get_result(self, timeout=0.5):
        return self._out_queue.get_nowait()

    def has_pending_work(self):
        return self._out_queue.qsize() != 0

    def join(self):
        return

    def terminate(self):
        return

    def seed_output_queue(self, target_urls):
        """
        Create the first fuzzable request objects based on the targets and put
        them in the output Queue.

        This will start the whole discovery process, since plugins are going
        to consume from that Queue and then put their results in it again in
        order to continue discovering.
        """
        # We only want to scan pages that are in current scope
        in_scope = lambda fr: fr.get_url().get_domain() == url.get_domain()

        for url in target_urls:
            # batman-fix already done verify, no need below codes
            # try:
            #     #
            #     #    GET the initial target URLs in order to save them
            #     #    in a list and use them as our bootstrap URLs
            #     #
            #     response = self._w3af_core.uri_opener.GET(url, cache=True)
            # except ScanMustStopException, w3:
            #     om.out.error('The target server is unreachable. Stopping.')
            #     raise w3
            # except HTTPRequestException, hre:
            #     msg = 'The target URL: "%s" is unreachable. Exception: "%s".'
            #     om.out.error(msg % (url, hre))
            # except Exception, e:
            #     msg = 'The target URL: "%s" is unreachable because of an' \
            #           ' unhandled exception. Error description: "%s". See' \
            #           ' debug output for more information.\n' \
            #           'Traceback for this error:\n%s'
            #     om.out.error(msg % (url, e, traceback.format_exc()))
            # else:
            #     _seed = FuzzableRequest(response.get_uri())
            _seed = FuzzableRequest(url)
            if in_scope(_seed):
                self._out_queue.put((None, None, _seed))

                # Update the set that lives in the KB
                kb.kb.add_fuzzable_request(_seed)

        self._out_queue.put(POISON_PILL)
コード例 #7
0
ファイル: seed.py プロジェクト: andresriancho/w3af
class seed(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    def __init__(self, w3af_core):
        """
        :param w3af_core: The w3af core that we'll use for status reporting
        """
        super(seed, self).__init__(name='%sController' % self.get_name())

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()

    def get_name(self):
        return 'Seed'

    def get_result(self, timeout=0.5):
        return self._out_queue.get_nowait()

    def has_pending_work(self):
        return self._out_queue.qsize() != 0

    def join(self):
        return

    @property
    def out_queue(self):
        # This output queue can contain one of the following:
        #    * POISON_PILL
        #    * (plugin_name, fuzzable_request, AsyncResult)
        #    * An ExceptionData instance
        return self._out_queue

    def terminate(self):
        while True:
            try:
                self._out_queue.get_nowait()
            except Empty:
                break
            else:
                self._out_queue.task_done()

        om.out.debug('No more tasks in Seed consumer output queue.')

    def seed_output_queue(self, target_urls):
        """
        Create the first fuzzable request objects based on the targets and put
        them in the output Queue.

        This will start the whole discovery process, since plugins are going
        to consume from that Queue and then put their results in it again in
        order to continue discovering.
        """
        # We only want to scan pages that are in current scope
        in_scope = lambda fr: fr.get_url().get_domain() == url.get_domain()

        for url in target_urls:
            try:
                #
                #    GET the initial target URLs in order to save them
                #    in a list and use them as our bootstrap URLs
                #
                response = self._w3af_core.uri_opener.GET(url, cache=True)
            except ScanMustStopException, w3:
                om.out.error('The target server is unreachable. Stopping.')
                raise w3
            except HTTPRequestException, hre:
                msg = 'The target URL: "%s" is unreachable. Exception: "%s".'
                om.out.error(msg % (url, hre))
            except Exception, e:
                msg = ('The target URL: "%s" is unreachable because of an'
                       ' unhandled exception. Error description: "%s". See'
                       ' debug output for more information.\n'
                       'Traceback for this error:\n%s')
                om.out.error(msg % (url, e, traceback.format_exc()))