def setUp(self):
        app = Flask(__name__)

        mongo = mongomock.MongoClient()
        crud.crud(app, mongo, 'tests')

        self.app = app.test_client()

        self.objects = [dict(foo='bar'), dict(foo='rab')]
        self.objects = [
            dict(current=obj.copy(), base=obj.copy(), patch=[])
            for obj in self.objects
        ]

        obj = self.objects[0]['current'].copy()
        obj['bat'] = 'br'
        obj['foo'] = 'br'
        patch = jsonpatch.make_patch(self.objects[0]['current'], obj).patch
        patch[0]['time'] = utils.unix_time(
            utils.date_to_datetime('2016-01-01'))
        patch[1]['time'] = utils.unix_time(
            utils.date_to_datetime('2017-01-01'))
        self.objects[0]['patch'] = patch
        self.objects[0]['current'] = obj

        for obj in self.objects:
            obj['_id'] = mongo.db.tests.insert(obj)
Beispiel #2
0
    def process_direction(self, sensor: DistanceSensor, floor: float,
                          db_field: str):
        '''
        When a sensor is tripped, detremines if the person went all the way through, or
        turned back before tripping the second sensor.

        Note:
            There is an allotted time to trip the second sensor before it times out and
            "thinks" the person has turned back.
        '''
        print('tripped')
        date = utils.iso_date(
        )  # iso 8601 format (yyyy-mm-dd) because we're not savages
        start_time = utils.unix_time()  # used for timeout
        while utils.unix_time(
        ) < start_time + self.timeout:  # poll the sensors until timeout
            if sensor.distance < floor:
                print('adding to db')
                self.db.increment(db_field, date)
                sensor.wait_for_out_of_range()
                return
            sleep(0.001)
def main():
    """
    Processing censys link page to the json
    https://scans.io/study/sonar.ssl
    :return:
    """
    parser = argparse.ArgumentParser(
        description='Processes SonarSSL links from the page, generates json')

    parser.add_argument('--url',
                        dest='url',
                        nargs=argparse.ZERO_OR_MORE,
                        default=[],
                        help='censys links')

    parser.add_argument('file',
                        nargs=argparse.ZERO_OR_MORE,
                        default=[],
                        help='censys link file')

    args = parser.parse_args()

    # Process the input

    dataset_idx = 10
    datasets = {}

    input_objects = []
    for file_name in args.file:
        input_objects.append(input_obj.FileInputObject(file_name))
    for url in args.url:
        input_objects.append(input_obj.LinkInputObject(url))

    if len(input_objects) == 0:
        print('Error; no input given')
        sys.exit(1)

    for iobj in input_objects:
        logger.info('Processing %s' % iobj)

        with iobj:
            data = iobj.text()
            tree = html.fromstring(data)
            tables = tree.xpath('//table')

            if len(tables) == 0:
                logger.error('Parsing problems, no tables given')
                continue

            for tbl_idx, table in enumerate(tables):
                rows = table[1]  # tbody
                rows_cnt = len(rows)
                if rows_cnt < 2:
                    logger.warning('Table %d has not enough rows: %d' %
                                   (tbl_idx, rows_cnt))
                    continue

                for row_idx, row in enumerate(rows):
                    if row[0].tag != 'td':
                        continue

                    file_href = row[0][0].attrib['href'].strip()
                    file_name = row[0][0].text_content().strip()
                    file_hash = row[2].text_content().strip()
                    file_size = row[3].text_content().strip()
                    file_date = row[4].text_content().strip()

                    if file_date not in datasets:
                        dataset = collections.OrderedDict()
                        dataset['id'] = dataset_idx
                        dataset['date'] = file_date
                        dataset['date_utc'] = utils.unix_time(
                            datetime.strptime(file_date, '%Y-%m-%d'))
                        dataset['files'] = collections.OrderedDict()
                        datasets[file_date] = dataset
                        dataset_idx += 1
                    else:
                        dataset = datasets[file_date]

                    link = Link(file_name, file_href, file_size, file_hash)
                    dataset['files'][file_name] = link.to_json()

    js = collections.OrderedDict()
    js['generated'] = time.time()
    js['data'] = sorted([datasets[x] for x in datasets], key=lambda x: x['id'])
    print(json.dumps(js, indent=2))
def main():
    """
    Processing censys link page to the json
    https://censys.io/data/443-https-tls-alexa_top1mil/historical
    https://censys.io/data/443-https-tls-full_ipv4/historical
    :return:
    """
    parser = argparse.ArgumentParser(
        description='Processes Censys links from the page, generates json')

    parser.add_argument('--url',
                        dest='url',
                        nargs=argparse.ZERO_OR_MORE,
                        default=[],
                        help='censys links')

    parser.add_argument('file',
                        nargs=argparse.ZERO_OR_MORE,
                        default=[],
                        help='censys link file')

    args = parser.parse_args()

    dataset_idx = 10
    datasets = []

    input_objects = []
    for file_name in args.file:
        input_objects.append(input_obj.FileInputObject(file_name))
    for url in args.url:
        input_objects.append(input_obj.LinkInputObject(url))

    if len(input_objects) == 0:
        print('Error; no input given')
        sys.exit(1)

    for iobj in input_objects:
        logger.info('Processing %s' % iobj)

        with iobj:
            data = iobj.text()
            tree = html.fromstring(data)
            tables = tree.xpath('//table')

            if len(tables) == 0:
                logger.error(
                    'Parsing problems, no tables given (probably not logged in)'
                )
                continue

            for tbl_idx, table in enumerate(reversed(tables)):
                rows = table[0]
                rows_cnt = len(rows)
                if rows_cnt < 2:
                    logger.warning('Table %d has not enough rows: %d' %
                                   (tbl_idx, rows_cnt))
                    continue

                prev_h2 = table.getprevious()
                header = prev_h2.text_content().strip()

                dataset = collections.OrderedDict()
                dataset['id'] = dataset_idx
                dataset['date'] = header
                dataset['date_utc'] = utils.unix_time(
                    datetime.strptime(header, '%Y-%m-%d %H:%M:%S'))
                dataset['files'] = collections.OrderedDict()
                for row_idx, row in enumerate(rows):
                    if row_idx == 0 or row[0].tag != 'td':
                        continue

                    file_href = row[0][0].attrib['href'].strip()
                    file_code = row[0][0].attrib['download'].strip()
                    file_name = row[0][0].text_content().strip()

                    file_type = row[1].text_content().strip()
                    file_size = row[2].text_content().strip()
                    file_hash = row[3].text_content().strip()
                    # logger.info('File %d %s %s %s %s %s %s' % (row_idx, file_href, file_code, file_name, file_type, file_size, file_hash))

                    link = Link(file_name, file_code, file_href, file_size,
                                file_type, file_hash)
                    dataset['files'][file_name] = link.to_json()

                if 'zgrab-results.json.lz4' not in dataset['files']:
                    logger.warning('Zgrab result file not found in %d' %
                                   dataset_idx)
                    logger.info(
                        'H: %s, files: %s' %
                        (header, ' '.join([x for x in dataset['files']])))

                datasets.append(dataset)
                dataset_idx += 1

    js = collections.OrderedDict()
    js['generated'] = time.time()
    js['data'] = datasets
    print(json.dumps(js, indent=2))
Beispiel #5
0
 def _fill_day(self):
     self.clear_widgets()
     sse = int(unix_time(self.dt))
     for i in range(65):
         self.add_widget(AgendaItem(timestamp=(i * 60 * 15) + sse))
Beispiel #6
0
 def test_unix_to_date(self):
     dt = utils.date_to_datetime(DATE)
     unix = utils.unix_time(dt)
     assert utils.unix_to_date(unix) == DATE
Beispiel #7
0
 def test_unix_time(self):
     dt = utils.date_to_datetime(DATE)
     assert utils.unix_time(dt) == 1530662400
Beispiel #8
0
    def process_record(self, idx, record):
        """
        Current record
        {"e":"0x10001","count":1,"source":["COMMON_NAME","NOT_BEFORE_2010-11-19"],
        "id":32000000,"cn":"COMMON_NAME","n":"0x...","timestamp":1475342704760}

        :param idx: 
        :param record: 
        :return: 
        """
        record['id'] = self.ctr

        ip = utils.defvalkey(record, 'ip')
        domain = utils.defvalkey(record, 'domain')
        timestamp_fmt = utils.defvalkey(record, 'timestamp')
        self.last_record_seen = record

        if not self.is_record_tls(record):
            self.not_tls += 1
            return

        server_cert = record['data']['tls']['server_certificates']
        if 'validation' not in server_cert or 'certificate' not in server_cert:
            self.not_cert_ok += 1
            return

        # Process chains anyway as we may be interested in them even though the server is not RSA
        chains_roots = self.process_roots(idx, record, server_cert)

        # Process server cert
        trusted = utils.defvalkey(server_cert['validation'], 'browser_trusted')
        matches = utils.defvalkey(server_cert['validation'], 'matches_domain')
        cert_obj = server_cert['certificate']

        if 'parsed' not in cert_obj:
            self.not_parsed += 1
            return

        parsed = cert_obj['parsed']
        try:
            ret = collections.OrderedDict()
            if parsed['subject_key_info']['key_algorithm']['name'].lower(
            ) != 'rsa':
                self.not_rsa += 1
                return

            ret['id'] = self.ctr
            ret['ip'] = ip
            ret['count'] = 1
            ret['fprint'] = utils.defvalkey(parsed, 'fingerprint_sha256')
            ret['fprint1'] = utils.defvalkey(parsed, 'fingerprint_sha1')
            utils.set_nonempty(ret, 'dom', domain)

            tstamp = utils.try_parse_timestamp(timestamp_fmt)
            ret['timestamp'] = utils.unix_time(tstamp)
            utils.set_nonempty(ret, 'trust', trusted)
            utils.set_nonempty(ret, 'match', matches)
            utils.set_nonempty(
                ret, 'valid', utils.defvalkeys(parsed, ['signature', 'valid']))
            utils.set_nonempty(
                ret, 'ssign',
                utils.defvalkeys(parsed, ['signature', 'self_signed']))

            self.fill_cn_src(ret, parsed)
            self.fill_rsa_ne(ret, parsed)
            ret['chains'] = chains_roots
            self.last_record_flushed = record

            if not self.is_dry():
                self.file_leafs_fh.write(json.dumps(ret) + '\n')

        except Exception as e:
            logger.warning('Certificate processing error %s : %s' %
                           (self.ctr, e))
            logger.debug(traceback.format_exc())
            self.not_cert_ok += 1
Beispiel #9
0
 def get_activity_id_by_timestamp(self, timestamp):
     if isinstance(timestamp, int):
         timestamp = datetime.datetime.utcfromtimestamp(timestamp)
     sse = unix_time(timestamp)
     query = 'SELECT id FROM Activity WHERE ? >= timestamp_from AND ? < timestamp_to'
     return self._cur.execute(query, (sse, sse,)).fetchall()[0][0]  # FIXME: Potential crash...