def handle(self, *args, **options):
        thresh = 0.10

        qset = IntrinsicImagesDecomposition.objects.all() \
            .exclude(error_comparison_thresh=thresh) \
            .values_list('id', flat=True)

        delete_failed_open = False
        if len(args) >= 1 and args[0] == "delete-failed-open":
            delete_failed_open = True

        print 'delete_failed_open: %s' % delete_failed_open

        qset = list(qset)
        random.shuffle(qset)

        chunksize = max(len(qset) / 1024, 1)
        print 'Queuing tasks in chunks of %s items...' % chunksize
        job = group([
            evaluate_decompositions_task.subtask(kwargs={
                'decomposition_ids': ids,
                'delete_failed_open': delete_failed_open,
                'thresh': thresh,
            })
            for ids in chunk_list_generator(qset, chunksize)
        ])
        job.apply_async()

        print 'Done'
    def handle(self, *args, **options):
        thresh = 0.10

        qset = IntrinsicImagesDecomposition.objects.all() \
            .exclude(error_comparison_thresh=thresh) \
            .values_list('id', flat=True)

        delete_failed_open = False
        if len(args) >= 1 and args[0] == "delete-failed-open":
            delete_failed_open = True

        print 'delete_failed_open: %s' % delete_failed_open

        qset = list(qset)
        random.shuffle(qset)

        chunksize = max(len(qset) / 1024, 1)
        print 'Queuing tasks in chunks of %s items...' % chunksize
        job = group([
            evaluate_decompositions_task.subtask(
                kwargs={
                    'decomposition_ids': ids,
                    'delete_failed_open': delete_failed_open,
                    'thresh': thresh,
                }) for ids in chunk_list_generator(qset, chunksize)
        ])
        job.apply_async()

        print 'Done'
    def compute_threshold(self, thresh):
        # split task into 1024 chunks (too many subtasks causes a backlog
        # error)
        chunksize = len(self.decomp_ids) / 1024
        print 'Queuing %s items (chunksize %s) for threshold %s...' % (
            len(self.decomp_ids), chunksize, thresh)

        job = group([
            evaluate_decompositions_task.subtask(kwargs={
                'decomposition_ids': ids,
                'delete_failed_open': False,
                'thresh': thresh,
            })
            for ids in chunk_list_generator(self.decomp_ids, chunksize)
        ])
        result = job.apply_async()

        print 'Waiting on %s subtasks with chunksize %s...' % (
            len(self.decomp_ids) / chunksize, chunksize)

        result.join()
    def compute_threshold(self, thresh):
        # split task into 1024 chunks (too many subtasks causes a backlog
        # error)
        chunksize = len(self.decomp_ids) / 1024
        print 'Queuing %s items (chunksize %s) for threshold %s...' % (len(
            self.decomp_ids), chunksize, thresh)

        job = group([
            evaluate_decompositions_task.subtask(
                kwargs={
                    'decomposition_ids': ids,
                    'delete_failed_open': False,
                    'thresh': thresh,
                }) for ids in chunk_list_generator(self.decomp_ids, chunksize)
        ])
        result = job.apply_async()

        print 'Waiting on %s subtasks with chunksize %s...' % (
            len(self.decomp_ids) / chunksize, chunksize)

        result.join()