Ejemplo n.º 1
0
    def test_async_basic_identify(self):
        # flush after every message
        analytics.default_client.flush_at = 1
        analytics.default_client.async = True

        last_identifies = analytics.stats.identifies
        last_successful = analytics.stats.successful
        last_flushes = analytics.stats.flushes

        analytics.identify('*****@*****.**', {
            "Subscription Plan": "Free",
            "Friends": 30
        })

        self.assertEqual(analytics.stats.identifies, last_identifies + 1)

        # this should flush because we set the flush_at to 1
        self.assertEqual(analytics.stats.flushes, last_flushes + 1)

        # this should do nothing, as the async thread is currently active
        analytics.flush()

        # we should see no more flushes here
        self.assertEqual(analytics.stats.flushes, last_flushes + 1)

        sleep(1)

        self.assertEqual(analytics.stats.successful, last_successful + 1)
    def send(self, action, install_method, num_errors):
        """Sends analytics track data to segmentIO.
        variant: string | open or enterprise
        action: string | preflight, deploy, or postflight
        install_method: string | gui, cli or advanced
        """
        analytics.write_key = "51ybGTeFEFU1xo6u10XMDrr6kATFyRyh"

        # Set customer key here rather than __init__ since we want the most up to date config
        # and config may change between __init__ and here.
        config = Config(CONFIG_PATH)
        customer_key = config.hacky_default_get('customer_key', None)

        # provider is always onprem when the cli installer is used
        provider = "onprem"
        # platform defaults to provider value, if not specified
        platform = config.hacky_default_get('platform', provider)

        analytics.track(user_id=customer_key, anonymous_id=self.uuid, event="installer", properties={
            "platform": platform,
            "provider": provider,
            "source": "installer",
            "variant": os.environ["BOOTSTRAP_VARIANT"],
            "install_id": self.uuid,
            "bootstrap_id": os.environ["BOOTSTRAP_ID"],
            "install_method": install_method,
            "action_name": action,
            "errors": num_errors,
            "customerKey": customer_key,
        })
        analytics.flush()
Ejemplo n.º 3
0
    def test_performance(self):

        to_send = 100

        target = analytics.stats.successful + to_send

        analytics.default_client.async = True
        analytics.default_client.flush_at = 200
        analytics.default_client.max_flush_size = 50
        analytics.default_client.set_log_level(logging.DEBUG)

        for i in range(to_send):
            analytics.track('*****@*****.**', 'Played a Song', {
                "Artist": "The Beatles",
                "Song": "Eleanor Rigby"
            })

        print('Finished submitting into the queue')

        start = time()
        while analytics.stats.successful < target:
            print ('Successful ', analytics.stats.successful, 'Left',
                (target - analytics.stats.successful),
                'Duration ', (time() - start))
            analytics.flush()
            sleep(1.0)
Ejemplo n.º 4
0
    def send(self, action, install_method, num_errors):
        """Sends analytics track data to segmentIO.
        variant: string | open or enterprise
        action: string | preflight, deploy, or postflight
        install_method: string | gui, cli or advanced
        """
        analytics.write_key = "51ybGTeFEFU1xo6u10XMDrr6kATFyRyh"

        # We set customer key from config to avoid loading the config during class init
        customer_key = backend.get_config().get("customer_key", None)

        analytics.track(user_id=customer_key,
                        anonymous_id=self.uuid,
                        event=action,
                        properties={
                            "provider": "onprem",
                            "source": "installer",
                            "variant": os.environ["BOOTSTRAP_VARIANT"],
                            "install_id": self.uuid,
                            "bootstrap_id": os.environ["BOOTSTRAP_ID"],
                            "install_method": install_method,
                            "stage": action,
                            "errors": num_errors,
                            "customerKey": customer_key,
                        })
        analytics.flush()
Ejemplo n.º 5
0
    def test_performance(self):

        to_send = 100

        target = analytics.stats.successful + to_send

        analytics.default_client.async = True
        analytics.default_client.flush_at = 200
        analytics.default_client.max_flush_size = 50
        analytics.default_client.set_log_level(logging.DEBUG)

        for i in range(to_send):
            analytics.track('*****@*****.**', 'Played a Song', {
                "Artist": "The Beatles",
                "Song": "Eleanor Rigby"
            })

        print 'Finished submitting into the queue'

        start = time()
        while analytics.stats.successful < target:
            print ('Successful ', analytics.stats.successful, 'Left',
                (target - analytics.stats.successful),
                'Duration ', (time() - start))
            analytics.flush()
            sleep(1.0)
Ejemplo n.º 6
0
    def test_async_basic_identify(self):
        # flush after every message
        analytics.default_client.flush_at = 1
        analytics.default_client.async = True

        last_identifies = analytics.stats.identifies
        last_successful = analytics.stats.successful
        last_flushes = analytics.stats.flushes

        analytics.identify('*****@*****.**', {
            "Subscription Plan": "Free",
            "Friends": 30
        })

        self.assertEqual(analytics.stats.identifies, last_identifies + 1)

        # this should flush because we set the flush_at to 1
        self.assertEqual(analytics.stats.flushes, last_flushes + 1)

        # this should do nothing, as the async thread is currently active
        analytics.flush()

        # we should see no more flushes here
        self.assertEqual(analytics.stats.flushes, last_flushes + 1)

        sleep(1)

        self.assertEqual(analytics.stats.successful, last_successful + 1)
Ejemplo n.º 7
0
def checkout(request, invoiceDate, redirectAddress, bank_transaction,
             failredirectAddress):
    invoiceNumber = AmirId(request.session['user_mail'])
    amount = bank_transaction.amount
    request.session["invoice_id"] = invoiceNumber
    now = datetime.now()

    pay = BankPayment()
    pay.invoice_number = invoiceNumber
    pay.amount = amount
    pay.session_key = request.session.session_key
    pay.user_mail = request.session["user_mail"]
    pay.gateway = "mellat"
    pay.user_id = request.session.get("user_id", None)
    pay.save()

    context = {
        'terminalId': TERMINAL_ID,
        'userName': USERNAME,
        'userPassword': PASSWORD,
        'orderId': invoiceNumber,
        'amount': amount,
        'localDate': now.strftime("%Y%m%d"),
        'localTime': now.strftime("%H%I%S"),
        'additionalData': pay.user_mail,
        'callBackUrl': redirectAddress,
        'payerId': "0"
    }

    print context

    client = suds.client.Client(ENDPOINT)
    result = client.service.bpPayRequest(**context).split(",")

    if result[0] != "0" and not os.environ.get('DEVELOPMENT', False):
        if int(result[0]) in CATASTROPHIC_ERRORS:
            analytics.identify("Pivot_Error", traits={'email': "REPORT_EMAIL"})
            analytics.track("Pivot_Error", "f5_error", {
                "error": result[0],
                "reference": invoiceNumber,
            })
            analytics.flush()

        print result
        request.session['transaction'].res_code = int(result[0])
        pay.ref_id = result[0]
        pay.save()
        return payment.views.create_redirect_page(request, {
            "mellat": True,
            "error": True
        })
    print bank_transaction
    context["refId"] = result[1]
    pay.ref_id = context["refId"]
    pay.save()
    bank_transaction.ref_id = context["refId"]
    request.session['transaction'] = bank_transaction
    context["mellat"] = True
    return payment.views.create_redirect_page(request, context)
Ejemplo n.º 8
0
 def test_debug(self):
     self.assertIsNone(analytics.default_client)
     analytics.debug = True
     analytics.flush()
     self.assertTrue(analytics.default_client.debug)
     analytics.default_client = None
     analytics.debug = False
     analytics.flush()
     self.assertFalse(analytics.default_client.debug)
Ejemplo n.º 9
0
 def test_gzip(self):
     self.assertIsNone(analytics.default_client)
     analytics.gzip = True
     analytics.flush()
     self.assertTrue(analytics.default_client.gzip)
     analytics.default_client = None
     analytics.gzip = False
     analytics.flush()
     self.assertFalse(analytics.default_client.gzip)
Ejemplo n.º 10
0
 def test_sync_mode(self):
     self.assertIsNone(analytics.default_client)
     analytics.sync_mode = True
     analytics.flush()
     self.assertTrue(analytics.default_client.sync_mode)
     analytics.default_client = None
     analytics.sync_mode = False
     analytics.flush()
     self.assertFalse(analytics.default_client.sync_mode)
Ejemplo n.º 11
0
def send_all_results_to_segment() -> None:
    analytics.write_key = os.environ["SEGMENT_TOKEN"]
    for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]):
        for f in files:
            if any(
                    f.endswith(valid_name)
                    for valid_name in task_mapping_segment.keys()):
                _push_results(f, os.path.join(dirpath, f))
    analytics.flush()
Ejemplo n.º 12
0
def track():
    try:
        content = request.get_json(silent=True)
        _userId = content.get('userId')
        _event = content.get('event')
        _properties = content.get('properties')
        _context = content.get('context')
        _timestamp = format_timestamp(content.get('timestamp'))
        _anonymous_id = content.get('anonymousId')
        _integrations = content.get('integrations')

        analytics.flush()
        analytics.track(_userId, _event, _properties, _context, _timestamp, _anonymous_id, _integrations)

        return format_response('track')

    except Exception as e:
        return json.dumps({'error': str(e)})
Ejemplo n.º 13
0
def group():

    try:
        content = request.get_json(silent=True)
        _userId = content.get('userId')
        _groupId = content.get('groupId')
        _traits = content.get('traits')
        _context = content.get('context')
        _timestamp = format_timestamp(content.get('timestamp'))
        _anonymous_id = content.get('anonymousId')
        _integrations = content.get('integrations')

        analytics.flush()
        analytics.group(_userId, _groupId, _traits, _context, _timestamp, _anonymous_id, _integrations)

        return format_response('group')

    except Exception as e:
        return json.dumps({'error': str(e)})
Ejemplo n.º 14
0
    def send(self, action, install_method, num_errors):
        """Sends analytics track data to segmentIO.
        variant: string | open or enterprise
        action: string | preflight, deploy, or postflight
        install_method: string | gui, cli or advanced
        """
        analytics.write_key = "39uhSEOoRHMw6cMR6st9tYXDbAL3JSaP"

        analytics.track(user_id=self.customer_key, anonymous_id=self.uuid, event=action, properties={
            "install_id": self.uuid,
            "bootstrap_id": self.bootstrap_version,
            "provider": self.source,
            "source": "installer",
            "install_method": install_method,
            "stage": action,
            "errors": num_errors,
            "customer_key": self.customer_key,
        })
        analytics.flush()
Ejemplo n.º 15
0
def widget_analytics():
    d = {}
    for k, v in request.args.iteritems():
        d[k] = v

    try:
        d["hostname"] = d['url'].split("/")[2]
        d["domain"] = ".".join(d['hostname'].split(".")[-2:])  # like "impactstory.org"
    except KeyError:
        #nevermind then
        pass

    try:
        api_key = d["api-key"]
    except KeyError:
        api_key = "unknown"

    logger.info(u"got widget analytics data: {data}".format(
        data=d))

    try:
        # later look stuff up here from db, based on api-key; send along w identify() call...
        analytics.identify(user_id=api_key)
    except IndexError:
        logger.debug(u"IndexError when doing analytics.identify in widget_analytics")

    try:
        analytics.track(
            user_id=api_key,
            event="Served a page with embedded widget",
            properties=d
        )
    except IndexError:
        logger.debug(u"IndexError when doing analytics.track in widget_analytics")

    try:
        analytics.flush(async=False)  # make sure all the data gets sent to segment.io
    except IndexError:
        # sometimes the data was already flushed and we get an error popping from an empty queue
        logger.debug(u"IndexError when doing analytics.flush in widget_analytics")

    return make_response(request.args.get("callback", "") + '({"status": "success"})', 200)
Ejemplo n.º 16
0
    def test_async_basic_track(self):

        analytics.default_client.flush_at = 50
        analytics.default_client.async = True

        last_tracks = analytics.stats.tracks
        last_successful = analytics.stats.successful

        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        self.assertEqual(analytics.stats.tracks, last_tracks + 1)

        analytics.flush()

        sleep(2)

        self.assertEqual(analytics.stats.successful, last_successful + 1)
Ejemplo n.º 17
0
    def test_async_basic_track(self):

        analytics.default_client.flush_at = 50
        analytics.default_client.async = True

        last_tracks = analytics.stats.tracks
        last_successful = analytics.stats.successful

        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        self.assertEqual(analytics.stats.tracks, last_tracks + 1)

        analytics.flush()

        sleep(2)

        self.assertEqual(analytics.stats.successful, last_successful + 1)
Ejemplo n.º 18
0
    def test_time_policy(self):

        analytics.default_client.async = False
        analytics.default_client.flush_at = 1

        # add something so we have a reason to flush
        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        # flush to reset flush count
        analytics.flush()

        last_flushes = analytics.stats.flushes

        # set the flush size trigger high
        analytics.default_client.flush_at = 50
        # set the time policy to 1 second from now
        analytics.default_client.flush_after = timedelta(seconds=1)

        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        # that shouldn't of triggered a flush
        self.assertEqual(analytics.stats.flushes, last_flushes)

        # sleep past the time-flush policy
        sleep(1.2)

        # submit another track to trigger the policy
        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        self.assertEqual(analytics.stats.flushes, last_flushes + 1)
Ejemplo n.º 19
0
    def test_time_policy(self):

        analytics.default_client.async = False
        analytics.default_client.flush_at = 1

        # add something so we have a reason to flush
        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        # flush to reset flush count
        analytics.flush()

        last_flushes = analytics.stats.flushes

        # set the flush size trigger high
        analytics.default_client.flush_at = 50
        # set the time policy to 1 second from now
        analytics.default_client.flush_after = timedelta(seconds=1)

        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        # that shouldn't of triggered a flush
        self.assertEqual(analytics.stats.flushes, last_flushes)

        # sleep past the time-flush policy
        sleep(1.2)

        # submit another track to trigger the policy
        analytics.track('*****@*****.**', 'Played a Song', {
            "Artist": "The Beatles",
            "Song": "Eleanor Rigby"
        })

        self.assertEqual(analytics.stats.flushes, last_flushes + 1)
Ejemplo n.º 20
0
    def send(self, action, install_method, num_errors):
        """Sends analytics track data to segmentIO.
        variant: string | open or enterprise
        action: string | preflight, deploy, or postflight
        install_method: string | gui, cli or advanced
        """
        analytics.write_key = "39uhSEOoRHMw6cMR6st9tYXDbAL3JSaP"

        analytics.track(user_id=self.customer_key,
                        anonymous_id=self.uuid,
                        event=action,
                        properties={
                            "install_id": self.uuid,
                            "bootstrap_id": self.bootstrap_version,
                            "provider": self.source,
                            "source": "installer",
                            "install_method": install_method,
                            "stage": action,
                            "errors": num_errors,
                            "customer_key": self.customer_key,
                        })
        analytics.flush()
Ejemplo n.º 21
0
    def send(self, action, install_method, num_errors):
        """Sends analytics track data to segmentIO.
        variant: string | open or enterprise
        action: string | preflight, deploy, or postflight
        install_method: string | gui, cli or advanced
        """
        analytics.write_key = "51ybGTeFEFU1xo6u10XMDrr6kATFyRyh"

        # We set customer key from config to avoid loading the config during class init
        customer_key = backend.get_config().get("customer_key", None)

        analytics.track(user_id=customer_key, anonymous_id=self.uuid, event=action, properties={
            "provider": "onprem",
            "source": "installer",
            "variant": os.environ["BOOTSTRAP_VARIANT"],
            "install_id": self.uuid,
            "bootstrap_id": os.environ["BOOTSTRAP_ID"],
            "install_method": install_method,
            "stage": action,
            "errors": num_errors,
            "customerKey": customer_key,
        })
        analytics.flush()
Ejemplo n.º 22
0

import impactstoryanalytics.widgets
from impactstoryanalytics.widgets.widget_api_helpers import Couchdb


logger = logging.getLogger("analytics.run_couch")


def run_couch():

    analytics.identify(user_id="stats")

    rows = Couchdb.get_view("collections_per_genre/collections_per_genre", True)
    products_per_quasigenre = {}
    for row in rows:
        products_per_quasigenre[row["key"]] = row["value"]

    products_per_quasigenre["total"] = products_per_quasigenre[":"]
    del products_per_quasigenre[":"]

    logger.info("products_per_quasigenre:" + str(products_per_quasigenre))

    analytics.track(user_id="stats", event='Profiles per quasigenre', properties=products_per_quasigenre)

    return(rows)

run_couch()

analytics.flush(async=False)  # make sure all the data gets sent to segment.io
Ejemplo n.º 23
0
 def test_flush(self):
     analytics.flush()
Ejemplo n.º 24
0
 def test_screen(self):
     analytics.screen('userId')
     analytics.flush()
Ejemplo n.º 25
0
 def test_writeKey(self):
     self.assertIsNone(analytics.default_client)
     analytics.flush()
     self.assertEqual(analytics.default_client.write_key, 'test-init')
Ejemplo n.º 26
0
 def test_alias(self):
     analytics.alias('previousId', 'userId')
     analytics.flush()
Ejemplo n.º 27
0
 def test_max_queue_size(self):
     self.assertIsNone(analytics.default_client)
     analytics.max_queue_size = 1337
     analytics.flush()
     self.assertEqual(analytics.default_client.queue.maxsize, 1337)
Ejemplo n.º 28
0
 def after_return(self, *args, **kwargs):
     if analytics.send:
         analytics.flush()
Ejemplo n.º 29
0
 def test_screen(self):
     analytics.screen('distinct_id')
     analytics.flush()
def worker_shutdown_handler(**kwargs):
    if analytics.send:
        analytics.flush()
Ejemplo n.º 31
0
            self.downloadOne()



if __name__ == "__main__" and "--restart" in sys.argv:
    conn = MongoClient(settings.MONGO_URI)
    db = conn.requests
    db.download.update({},{"canDownload":1})

if __name__ == "__main__" and "--download" in sys.argv:
    try:
        link = sys.argv[sys.argv.index("--download")+1]
        data = sys.argv[sys.argv.index("--download")+2]
        downloader = Downloader().downloadItem({
            "_id" : str(ObjectId()),
            "links" : [(link,True)],
            "data" : data
        })
        
        send_download_status(link, downloader)
        analytics.flush()

    except Exception as e:
        print("Invalid Data : %s" % e)
        print("Usage : downloader --download link jsondata")

if __name__ == "__main__" and "--startdownloader" in sys.argv:
    conn = MongoClient(settings.MONGO_URI)
    db = conn.requests
    Downloader(db=db).start()
Ejemplo n.º 32
0
 def test_screen(self):
     analytics.screen('userId')
     analytics.flush()
Ejemplo n.º 33
0
 def test_timeout(self):
     self.assertIsNone(analytics.default_client)
     analytics.timeout = 1.234
     analytics.flush()
     self.assertEqual(analytics.default_client.timeout, 1.234)
Ejemplo n.º 34
0
 def test_alias(self):
     analytics.alias('previousId', 'distinct_id')
     analytics.flush()
Ejemplo n.º 35
0

def read_results(file):
    with open(file) as json_file:
        data = json.load(json_file)

        keys = [
            "accuracy", "weighted avg", "macro avg", "micro avg",
            "conversation_accuracy"
        ]
        result = {key: data[key] for key in keys if key in data}

    return result


def push_results(file_name, file):
    result = read_results(file)
    result["file_name"] = file_name
    result["task"] = task_mapping[file_name]
    send_to_segment(result)


if __name__ == "__main__":
    for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]):
        for f in files:
            if any(
                    f.endswith(valid_name)
                    for valid_name in task_mapping.keys()):
                push_results(f, os.path.join(dirpath, f))
    analytics.flush()
Ejemplo n.º 36
0
 def test_identify(self):
     analytics.identify('userId', {'email': '*****@*****.**'})
     analytics.flush()
Ejemplo n.º 37
0
 def test_track(self):
     analytics.track('userId', 'python module event')
     analytics.flush()
Ejemplo n.º 38
0
 def test_host(self):
     self.assertIsNone(analytics.default_client)
     analytics.host = 'test-host'
     analytics.flush()
     self.assertEqual(analytics.default_client.host, 'test-host')
Ejemplo n.º 39
0
 def test_page(self):
     analytics.page('distinct_id')
     analytics.flush()
Ejemplo n.º 40
0
 def test_identify(self):
     analytics.identify('userId', { 'email': '*****@*****.**' })
     analytics.flush()
Ejemplo n.º 41
0
 def test_track(self):
     analytics.track('userId', 'python module event')
     analytics.flush()
Ejemplo n.º 42
0
 def test_group(self):
     analytics.group('userId', 'groupId')
     analytics.flush()
Ejemplo n.º 43
0
 def test_group(self):
     analytics.group('userId', 'groupId')
     analytics.flush()
Ejemplo n.º 44
0
 def test_alias(self):
     analytics.alias('previousId', 'userId')
     analytics.flush()
Ejemplo n.º 45
0
 def test_page(self):
     analytics.page('userId')
     analytics.flush()
Ejemplo n.º 46
0
 def test_page(self):
     analytics.page('userId')
     analytics.flush()
Ejemplo n.º 47
0
 def test_flush(self):
     analytics.flush()
Ejemplo n.º 48
0
def sendSegment(data):
    analytics.write_key="87GPbpE99eOSSdTT0VhrSTVJ4aCE2r9F"
    analytics.debug = True
    analytics.on_error = on_error
    analytics.identify('maxinetestdata', data)
    analytics.flush()