Example #1
0
def upload_photos(set, photos, key, secret, checkpoint):
    try:
        auth = Auth(key, secret)
        auth.authenticate()
    except urllib2.HTTPError as e:
        print e.read()
        raise

    set_controller = Photosets(auth)

    # Work queue to print upload status
    ui_wq = WorkQueue(print_status, num_workers=1)

    upload_and_add(photos[0], set, auth, set_controller, ui_wq, checkpoint)

    wq = WorkQueue(upload_and_add,
                   num_workers=16,
                   max_queue_size=50,
                   set=set,
                   auth=auth,
                   set_controller=set_controller,
                   ui_wq=ui_wq,
                   checkpoint=checkpoint)

    for photo in photos[1:]:
        wq.add(photo)

    wq.done()
    ui_wq.done()
def upload_photos(set, photos, key, secret, checkpoint):
	try:
		auth = Auth(key, secret)
		auth.authenticate()
	except urllib2.HTTPError as e:
		print e.read()
		raise

	set_controller = Photosets(auth)

	# Work queue to print upload status
	ui_wq = WorkQueue(print_status, num_workers = 1)

	upload_and_add(photos[0], set, auth, set_controller, ui_wq, checkpoint)

	wq = WorkQueue(upload_and_add, 
		num_workers = 16, 
		max_queue_size = 50, 
		set = set, 
		auth = auth, 
		set_controller = set_controller,
		ui_wq = ui_wq,
		checkpoint = checkpoint)

	for photo in photos[1:]:
		wq.add(photo)
	
	wq.done()
	ui_wq.done()
Example #3
0
    def __init__(self, *args, **kws):

        port = kws.pop('port', 9123)
        master_name = kws.pop('name', 'bs.sge.wq')
        catalog = kws.pop('catalog', True)
        exclusive = kws.pop('exclusive', False)
        wq_alg = kws.pop('wq_alg', WORK_QUEUE_SCHEDULE_FCFS)

        wq_debug = kws.pop('ccl_debug', 'all')
        set_debug_flag(wq_debug)

        backend.Backend.__init__(self, *args, **kws)

        self.workqueue = WorkQueue(port=port,
                                   name=master_name,
                                   catalog=catalog,
                                   exclusive=exclusive)
        self.workqueue.specify_algorithm(wq_alg)

        # self.workqueue.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE)
        self.workqueue.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED)
Example #4
0
    def __init__(self, *args, **kws):

        port        = kws.pop('port', 9123)
        master_name = kws.pop('name', 'bs.sge.wq')
        catalog     = kws.pop('catalog', True)
        exclusive   = kws.pop('exclusive', False)
        wq_alg      = kws.pop('wq_alg', WORK_QUEUE_SCHEDULE_FCFS)

        wq_debug    = kws.pop('ccl_debug', 'all')
        set_debug_flag(wq_debug)

        backend.Backend.__init__(self, *args, **kws)

        self.workqueue = WorkQueue(port=port,name=master_name, catalog=catalog, exclusive=exclusive)
        self.workqueue.specify_algorithm(wq_alg)

        # self.workqueue.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE)
        self.workqueue.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED)
Example #5
0
def work():
    with tempfile.TemporaryDirectory() as td:
        with ThreadPoolExecutor(2, thread_name_prefix="test-executor-") as ex:
            yield WorkQueue(__file__, ex, Path(td))
Example #6
0
class SGEWorkQueue(sge.SGE):
    """
    Generates a workflow using for SGE, but uses WorkQueue to run the jobs.
    The master user can then have multiple accounts submit workers for the jobs.
    """
    def __init__(self, *args, **kws):

        port = kws.pop('port', 9123)
        master_name = kws.pop('name', 'bs.sge.wq')
        catalog = kws.pop('catalog', True)
        exclusive = kws.pop('exclusive', False)
        wq_alg = kws.pop('wq_alg', WORK_QUEUE_SCHEDULE_FCFS)

        wq_debug = kws.pop('ccl_debug', 'all')
        set_debug_flag(wq_debug)

        backend.Backend.__init__(self, *args, **kws)

        self.workqueue = WorkQueue(port=port,
                                   name=master_name,
                                   catalog=catalog,
                                   exclusive=exclusive)
        self.workqueue.specify_algorithm(wq_alg)

        # self.workqueue.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE)
        self.workqueue.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED)

    def create_task(self, jobfile):
        """
        Creates a Task to execute the specified jobfile
        """

        job = os.path.basename(jobfile)

        cmd = '%(jobfile)s' % {
            # 'workarea' : self.workarea,
            'jobfile': jobfile
        }

        print 'Task Command:', cmd

        t = Task(cmd)
        t.tag = job

        return t

    def submit_jobs(self, jobfiles, **kws):
        """
        Creates Tasks for the jobfiles and submits them to the WorkQueue
        """

        for job in jobfiles:
            task = self.create_task(job)
            self.workqueue.submit(task)

    def is_job_running(self):
        return not self.workqueue.empty()

    def wait(self, **kws):
        """
        Wait for an SGE job to finish. Raises a *BackendError* if *max_tries* is exceeded.
        Key words:
          *poll_interval* : how long to wait between tries. Format: <time><units> where <units>
                            can be one of s, m, h, d, w for seconds, minutes, hours, days, weeks respectively
                            Default = 1m
          *max_tries*     : number of iterations to wait before giving up.
                            Default = infinity

        Returns: Boolean indicating if any task failed
        """

        poll_interval = kws.get('poll_interval', '1m')
        max_tries = kws.get('max_tries', float('inf'))

        sleeptime = self.parse_time_units(poll_interval)

        tries = 0

        success = True

        while True:
            if not self.is_job_running():
                break
            if tries > max_tries:
                break

            task = self.workqueue.wait(sleeptime)

            print '\tinit:', self.workqueue.stats.workers_init
            print '\tready:', self.workqueue.stats.workers_ready
            print '\tbusy:', self.workqueue.stats.workers_busy
            print '\trunning:', self.workqueue.stats.tasks_running
            print '\twaiting:', self.workqueue.stats.tasks_waiting
            print '\tcomplete:', self.workqueue.stats.tasks_complete

            if task:
                print 'Job %s finished with %s' % (task.tag,
                                                   task.return_status)
                print '++++++++++++++++++++++++++++++', 'JOB OUTPUT: %s' % task.tag, '++++++++++++++++++++++++++++++'
                print task.output.strip()
                print '================================================================================'
                success = success and task.return_status == 0

                if not task.return_status == 0:
                    self.workqueue.submit(task)
                    tries += 1

        return success

    def job_preamble(self, **kws):
        return '#!/usr/bin/env bash'

    def job_conclusion(self, **kws):
        return """\
try:
    os.mkdir(photo_dir)
except OSError:
    res = raw_input(photo_dir + " exists! Continue? (y/n) ")

    if res == "y":
        pass
    else:
        raise

print "Downloading " + str(num_photos) + " photos from album " + album_name

num_pages = num_photos / PHOTOS_PER_PAGE + 1
page = 1
wq = WorkQueue(downloadPhoto)

while True:
    request = createApiRequest(FLICKR_LIST_REQUEST, {"photoset_id": photo_set_id, "page": page})

    handler = urllib2.urlopen(request)
    response = json.loads(handler.read())

    for photo_md in response["photoset"]["photo"]:
        wq.add(photo_md)

    page += 1
    num_pages = response["photoset"]["pages"]

    if page > num_pages:
        break
Example #8
0
class SGEWorkQueue(sge.SGE):
    """
    Generates a workflow using for SGE, but uses WorkQueue to run the jobs.
    The master user can then have multiple accounts submit workers for the jobs.
    """


    def __init__(self, *args, **kws):

        port        = kws.pop('port', 9123)
        master_name = kws.pop('name', 'bs.sge.wq')
        catalog     = kws.pop('catalog', True)
        exclusive   = kws.pop('exclusive', False)
        wq_alg      = kws.pop('wq_alg', WORK_QUEUE_SCHEDULE_FCFS)

        wq_debug    = kws.pop('ccl_debug', 'all')
        set_debug_flag(wq_debug)

        backend.Backend.__init__(self, *args, **kws)

        self.workqueue = WorkQueue(port=port,name=master_name, catalog=catalog, exclusive=exclusive)
        self.workqueue.specify_algorithm(wq_alg)

        # self.workqueue.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE)
        self.workqueue.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED)


        
    def create_task(self, jobfile):
        """
        Creates a Task to execute the specified jobfile
        """

        job = os.path.basename(jobfile)

        cmd = '%(jobfile)s' % {
            # 'workarea' : self.workarea,
            'jobfile'  : jobfile }

        print 'Task Command:', cmd

        t = Task(cmd)
        t.tag = job

        return t

    def submit_jobs(self, jobfiles, **kws):
        """
        Creates Tasks for the jobfiles and submits them to the WorkQueue
        """

        for job in jobfiles:
            task = self.create_task(job)
            self.workqueue.submit(task)


    def is_job_running(self):
        return not self.workqueue.empty()

    def wait(self, **kws):
        """
        Wait for an SGE job to finish. Raises a *BackendError* if *max_tries* is exceeded.
        Key words:
          *poll_interval* : how long to wait between tries. Format: <time><units> where <units>
                            can be one of s, m, h, d, w for seconds, minutes, hours, days, weeks respectively
                            Default = 1m
          *max_tries*     : number of iterations to wait before giving up.
                            Default = infinity

        Returns: Boolean indicating if any task failed
        """

        poll_interval = kws.get('poll_interval', '1m')
        max_tries     = kws.get('max_tries', float('inf'))

        sleeptime     = self.parse_time_units(poll_interval)

        tries         = 0

        success = True

        while True:
            if not self.is_job_running():
                break
            if tries > max_tries:
                break

            task   = self.workqueue.wait(sleeptime)

            print '\tinit:', self.workqueue.stats.workers_init
            print '\tready:', self.workqueue.stats.workers_ready
            print '\tbusy:', self.workqueue.stats.workers_busy
            print '\trunning:', self.workqueue.stats.tasks_running
            print '\twaiting:', self.workqueue.stats.tasks_waiting
            print '\tcomplete:', self.workqueue.stats.tasks_complete

            if task:
                print 'Job %s finished with %s' % (task.tag, task.return_status)
                print '++++++++++++++++++++++++++++++', 'JOB OUTPUT: %s' % task.tag, '++++++++++++++++++++++++++++++'
                print task.output.strip()
                print '================================================================================'
                success = success and task.return_status == 0

                if not task.return_status == 0:
                    self.workqueue.submit(task)
                    tries += 1

        return success


    def job_preamble(self, **kws):
        return '#!/usr/bin/env bash'

    def job_conclusion(self, **kws):
        return """\