Пример #1
0
    def handle(self, *args, **options):

        start = datetime.utcnow()
        if len(args) == 0:
            num_pool = POOL_SIZE
        else:
            num_pool = int(args[0])

        if len(args) > 1:
            username = args[1]
        else:
            username = '******'

        pool = Pool(num_pool)

        apps = get_apps()

        completed = set()
        app_ids = set(range(len(apps)))
        for app_id in sorted(app_ids.difference(completed)):
            #keep trying all the preindexes until they all complete satisfactorily.
            print "Trying to preindex view (%d/%d) %s" % (app_id, len(apps),
                                                          apps[app_id])
            pool.spawn(do_sync, app_id)

        # sshhhhhh: if we're using HQ also preindex the couch apps
        # this could probably be multithreaded too, but leaving for now
        try:
            from corehq.couchapps import sync_design_docs
        except ImportError:
            pass
        else:
            sync_design_docs(get_db(), temp="tmp")

        # same hack above for MVP
        try:
            from mvp_apps import sync_design_docs as mvp_sync
        except ImportError:
            pass
        else:
            mvp_sync(get_db(), temp="tmp")

        # same hack above for MVP
        try:
            from fluff.sync_couchdb import sync_design_docs as fluff_sync
        except ImportError:
            pass
        else:
            fluff_sync(temp="tmp")

        print "All apps loaded into jobs, waiting..."
        pool.join()
        print "All apps reported complete."

        message = "Preindex results:\n"
        message += "\tInitiated by: %s\n" % username

        delta = datetime.utcnow() - start
        message += "Total time: %d seconds" % delta.seconds
        print message

        send_mail('%s Preindex Complete' % settings.EMAIL_SUBJECT_PREFIX,
                  message,
                  settings.SERVER_EMAIL, [x[1] for x in settings.ADMINS],
                  fail_silently=True)
    def handle(self, *args, **options):


        start = datetime.utcnow()
        if len(args) == 0:
            num_pool = POOL_SIZE
        else:
            num_pool = int(args[0])

        if len(args) > 1:
            username = args[1]
        else:
            username = '******'

        pool = Pool(num_pool)

        apps = get_apps()

        completed = set()
        app_ids = set(range(len(apps)))
        for app_id in sorted(app_ids.difference(completed)):
            #keep trying all the preindexes until they all complete satisfactorily.
            print "Trying to preindex view (%d/%d) %s" % (app_id, len(apps), apps[app_id])
            pool.spawn(do_sync, app_id)

        # sshhhhhh: if we're using HQ also preindex the couch apps
        # this could probably be multithreaded too, but leaving for now
        try:
            from corehq.couchapps import sync_design_docs
        except ImportError:
            pass
        else:
            sync_design_docs(get_db(), temp="tmp")

        # same hack above for MVP
        try:
            from mvp_apps import sync_design_docs as mvp_sync
        except ImportError:
            pass
        else:
            mvp_sync(get_db(), temp="tmp")

        # same hack above for MVP
        try:
            from fluff.sync_couchdb import sync_design_docs as fluff_sync
        except ImportError:
            pass
        else:
            fluff_sync(temp="tmp")

        print "All apps loaded into jobs, waiting..."
        pool.join()
        print "All apps reported complete."

        message = "Preindex results:\n"
        message += "\tInitiated by: %s\n" % username

        delta = datetime.utcnow() - start
        message += "Total time: %d seconds" % delta.seconds
        print message

        send_mail('%s Preindex Complete' % settings.EMAIL_SUBJECT_PREFIX,
                  message,
                  settings.SERVER_EMAIL,
                  [x[1] for x in settings.ADMINS],
                  fail_silently=True)
    def handle(self, *args, **options):

        start = datetime.utcnow()
        if len(args) == 0:
            num_pool = POOL_SIZE
        else:
            num_pool = int(args[0])

        if len(args) > 1:
            username = args[1]
        else:
            username = "******"

        pool = Pool(num_pool)

        apps = get_apps()

        completed = set()
        app_ids = set(range(len(apps)))
        for app_id in sorted(app_ids.difference(completed)):
            # keep trying all the preindexes until they all complete satisfactorily.
            print "Trying to preindex view (%d/%d) %s" % (app_id, len(apps), apps[app_id])
            pool.spawn(do_sync, app_id)

        # sshhhhhh: if we're using HQ also preindex the couch apps
        # this could probably be multithreaded too, but leaving for now
        try:
            from corehq.couchapps import sync_design_docs
        except ImportError:
            pass
        else:
            sync_design_docs(get_db(), temp="tmp")

        # same hack above for MVP
        try:
            from mvp_apps import sync_design_docs as mvp_sync
        except ImportError:
            pass
        else:
            mvp_sync(get_db(), temp="tmp")

        print "All apps loaded into jobs, waiting..."
        pool.join()
        print "All apps reported complete."

        # Git info
        message = "Preindex results:\n"
        message += "\tInitiated by: %s\n" % username

        delta = datetime.utcnow() - start
        message += "Total time: %d seconds" % delta.seconds
        print message

        # todo: customize this more for other users
        send_mail(
            "[commcare-hq] Preindex Complete",
            message,
            "*****@*****.**",
            ["*****@*****.**"],
            fail_silently=True,
        )