def setUp(self):
     # build some data in the form of envelops
     del _TestSender.instances[:]
     client = CliTelemetryClient(sender=_TestSender)
     with open(os.path.join(TEST_RESOURCE_FOLDER, 'cache'), mode='r') as fq:
         for line in fq.readlines()[:5]:
             client.add(line[20:], flush=True, force=True)
     self.sample_data = _TestSender.instances[0].data
Example #2
0
 def setUp(self):
     # build some data in the form of envelops
     del _TestSender.instances[:]
     client = CliTelemetryClient(sender=_TestSender)
     with open(os.path.join(TEST_RESOURCE_FOLDER, 'cache'), mode='r') as fq:
         for line in fq.readlines()[:5]:
             client.add(line[20:], flush=True, force=True)
     self.sample_data = _TestSender.instances[0].data
Example #3
0
def main():
    from azure.cli.telemetry.util import should_upload
    from azure.cli.telemetry.components.telemetry_note import TelemetryNote
    from azure.cli.telemetry.components.records_collection import RecordsCollection
    from azure.cli.telemetry.components.telemetry_client import CliTelemetryClient
    from azure.cli.telemetry.components.telemetry_logging import config_logging_for_upload, get_logger

    try:
        config_dir = sys.argv[1]
        config_logging_for_upload(config_dir)

        logger = get_logger('main')
        logger.info('Attempt start. Configuration directory [%s].', sys.argv[1])

        if not should_upload(config_dir):
            logger.info('Exit early. The note file indicates it is not a suitable time to upload telemetry.')
            sys.exit(0)

        try:
            with TelemetryNote(config_dir) as telemetry_note:
                telemetry_note.touch()

                collection = RecordsCollection(telemetry_note.get_last_sent(), config_dir)
                collection.snapshot_and_read()

                client = CliTelemetryClient()
                for each in collection:
                    client.add(each, flush=True)
                client.flush(force=True)

                telemetry_note.update_telemetry_note(collection.next_send)
        except portalocker.AlreadyLocked:
            # another upload process is running.
            logger.info('Lock out from note file under %s which means another process is running. Exit 0.', config_dir)
            sys.exit(0)
        except IOError as err:
            logger.warning('Unexpected IO Error %s. Exit 1.', err)
            sys.exit(1)
        except Exception as err:  # pylint: disable=broad-except
            logger.error('Unexpected Error %s. Exit 2.', err)
            logger.exception(err)
            sys.exit(2)
    except IndexError:
        sys.exit(1)
Example #4
0
def main():
    from azure.cli.telemetry.util import should_upload
    from azure.cli.telemetry.components.telemetry_note import TelemetryNote
    from azure.cli.telemetry.components.records_collection import RecordsCollection
    from azure.cli.telemetry.components.telemetry_client import CliTelemetryClient
    from azure.cli.telemetry.components.telemetry_logging import config_logging_for_upload, get_logger

    try:
        config_dir = sys.argv[1]
        config_logging_for_upload(config_dir)

        logger = get_logger('main')
        logger.info('Attempt start. Configuration directory [%s].',
                    sys.argv[1])

        if not should_upload(config_dir):
            logger.info(
                'Exit early. The note file indicates it is not a suitable time to upload telemetry.'
            )
            sys.exit(0)

        try:
            with TelemetryNote(config_dir) as telemetry_note:
                telemetry_note.touch()

                collection = RecordsCollection(telemetry_note.get_last_sent(),
                                               config_dir)
                collection.snapshot_and_read()

                client = CliTelemetryClient()
                for each in collection:
                    client.add(each, flush=True)
                client.flush(force=True)

                telemetry_note.update_telemetry_note(collection.next_send)
        except portalocker.AlreadyLocked:
            # another upload process is running.
            logger.info(
                'Lock out from note file under %s which means another process is running. Exit 0.',
                config_dir)
            sys.exit(0)
        except IOError as err:
            logger.warning('Unexpected IO Error %s. Exit 1.', err)
            sys.exit(1)
        except Exception as err:  # pylint: disable=broad-except
            logger.error('Unexpected Error %s. Exit 2.', err)
            logger.exception(err)
            sys.exit(2)
    except IndexError:
        sys.exit(1)
    def test_telemetry_client_without_flush(self):
        client = CliTelemetryClient(sender=_TestSender)

        self.assertEqual(0, len(_TestSender.instances))

        for r in self.sample_records[:10]:
            client.add(r)

        self.assertEqual(1, len(_TestSender.instances))
        sender = _TestSender.instances[0]

        self.assertEqual(0, len(sender.data))

        # flush is skipped because record collection size is small
        client.flush()
        self.assertEqual(0, len(sender.data))

        # force flush should send data set even it is smaller than the batch threshold
        # there should be 10 envelops in the batch of data
        client.flush(force=True)
        self.assertEqual(1, len(sender.data))
        self.assertEqual(10, len(sender.data[0]))

        # repeat flush should not duplicate data
        client.flush(force=True)
        self.assertEqual(1, len(sender.data))

        # default batch size is 100, ensure data is sent after accumulation
        del sender.data[:]
        count = 0
        for r in self.sample_records:
            client.add(r, flush=True)

            count += 1
            if not count % 100:
                self.assertEqual(1, len(sender.data))
                del sender.data[:]
            else:
                self.assertEqual(0, len(sender.data))
Example #6
0
    def test_telemetry_client_without_flush(self):
        client = CliTelemetryClient(sender=_TestSender)

        self.assertEqual(0, len(_TestSender.instances))

        for r in self.sample_records[:10]:
            client.add(r)

        self.assertEqual(1, len(_TestSender.instances))
        sender = _TestSender.instances[0]

        self.assertEqual(0, len(sender.data))

        # flush is skipped because record collection size is small
        client.flush()
        self.assertEqual(0, len(sender.data))

        # force flush should send data set even it is smaller than the batch threshold
        # there should be 10 envelops in the batch of data
        client.flush(force=True)
        self.assertEqual(1, len(sender.data))
        self.assertEqual(10, len(sender.data[0]))

        # repeat flush should not duplicate data
        client.flush(force=True)
        self.assertEqual(1, len(sender.data))

        # default batch size is 100, ensure data is sent after accumulation
        del sender.data[:]
        count = 0
        for r in self.sample_records:
            client.add(r, flush=True)

            count += 1
            if not count % 100:
                self.assertEqual(1, len(sender.data))
                del sender.data[:]
            else:
                self.assertEqual(0, len(sender.data))