Esempio n. 1
0
def scale(size, smooth, source, target, concurrency):
    
    canceled = False
    
    jobs = multiprocessing.JoinableQueue()
    
    results = multiprocessing.Queue()
    
    create_processes(size, smooth, jobs, results, concurrency)
    
    todo = add_jobs(source, target, jobs)
    
    try:
        jobs.join()
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    
    copied = scaled = 0
    
    while not results.empty(): # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    
    return Summary(todo, copied, scaled, canceled)
def results():
    while True:
        result = (yield)
        results.todo += result.todo
        results.copied += result.copied
        results.scaled += result.scaled
        Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
                os.path.basename(result.name)))
Esempio n. 3
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(limit, jobs, results, concurrency)
    todo = add_jobs(filename, jobs)
    process(todo, jobs, results, concurrency)
Esempio n. 4
0
def main():
    limit, how_many_threads = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")

    jobs = queue.Queue()
    results = queue.Queue()
    create_threads(limit, jobs, results, how_many_threads)
    todo = add_jobs(filename, jobs)
    process(todo, jobs, results, how_many_threads)
Esempio n. 5
0
def summarize(summary, concurrency):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} processes".format(concurrency)
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Esempio n. 6
0
def summarize(concurrency, canceled):
    message = "copied {} scaled {} ".format(results.copied, results.scaled)
    difference = results.todo - (results.copied + results.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} coroutines".format(concurrency)
    if canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Esempio n. 7
0
def summarize(summary, concurrency):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} processes".format(concurrency)
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Esempio n. 8
0
def summarize(summary):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "single-threaded"
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Esempio n. 9
0
def main():
    size, smooth, source, target, concurrency = handle_commandline()
    Qtrac.report("starting...")
    canceled = False
    try:
        scale(size, smooth, source, target, concurrency)
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
    summarize(concurrency, canceled)
Esempio n. 10
0
def summarize(summary):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "single-threaded"
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
def summarize(concurrency, canceled):
    message = "copied {} scaled {} ".format(results.copied, results.scaled)
    difference = results.todo - (results.copied + results.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} coroutines".format(concurrency)
    if canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
def main():
    size, smooth, source, target, concurrency = handle_commandline()
    Qtrac.report("starting...")
    canceled = False
    try:
        scale(size, smooth, source, target, concurrency)
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
    summarize(concurrency, canceled)
Esempio n. 13
0
def scaler(receiver, sink, size, smooth, me):
    while True:
        sourceImage, targetImage, who = (yield)
        if who == me:
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                sink.send(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        elif receiver is not None:
            receiver.send((sourceImage, targetImage, who))
def scaler(receiver, sink, size, smooth, me):
    while True:
        sourceImage, targetImage, who = (yield)
        if who == me:
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                sink.send(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        elif receiver is not None:
            receiver.send((sourceImage, targetImage, who))
Esempio n. 15
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()  # block until Feed is loaded into this queue. see def add_jobs()
            ok, result = Feed.read(feed, limit)  # open url in feed.url and return news title + body; ok: url open ok
            if not ok:
                Qtrac.report(result, True)  # result here is url title + error info
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))  # ignore the <ul> tag
                results.put(result)
        finally:
            jobs.task_done()  # each jobs.get() ends with jobs.task_done()
Esempio n. 16
0
def worker(limit, jobs, results):
    while True:  # 调用此方法的是守护线程,当主线程结束后自动会退出。
        try:
            feed = jobs.get()
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
Esempio n. 17
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
Esempio n. 18
0
def worker(size, smooth, jobs, results):
    while True:
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("copied" if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()  # 使用者使用此方法发出信号,表示get的返回项目已经被处理。必须与get方法一一对应,如果调用此方法的次数大于从
def worker(size, smooth, jobs, results):
    while True:
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("copied" if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
Esempio n. 20
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()
            # Feed is a namedtuple of (name, rss_url)
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
Esempio n. 21
0
def reader(receiver, sink, limit, me):
    while True:
        feed, who = (yield)
        if who == me:
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
                result = None
            else:
                Qtrac.report("read {} at {}".format(feed.title, feed.url))
            sink.send(result)
        elif receiver is not None:
            receiver.send((feed, who))
def reader(receiver, sink, limit, me):
    while True:
        feed, who = (yield)
        if who == me:
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
                result = None
            else:
                Qtrac.report("read {} at {}".format(feed.title, feed.url))
            sink.send(result)
        elif receiver is not None:
            receiver.send((feed, who))
Esempio n. 23
0
def worker(size, smooth, jobs, results):
    while True:
        try:
            scr_image, dest_image = jobs.get()
            try:
                result = scale_one(size, smooth, scr_image, dest_image)
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
Esempio n. 24
0
def worker(size, smooth, jobs, results):
    
    while True:
        
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("[%s]"%result.copied if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Exception as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
Esempio n. 25
0
def main():
    
    t1 = time.time()
    
    print(multiprocessing.cpu_count())
    
    size, smooth, source, target, concurrency = (100,1,".",".",4) #handle_commandline()
    
    Qtrac.report("starting...")
    
    summary = scale(size, smooth, source, target, concurrency)
    
    v = time.time() - t1
    print("=="*20)
    print "time:%s" %(v)
Esempio n. 26
0
def summarize(summary, concurrency):
    """汇总所有的处理结果
    
    Arguments:
        summary {[type]} -- [description]
        concurrency {[type]} -- [description]
    """
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} processes".format(concurrency)
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Esempio n. 27
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join() # Wait for all the work to be done
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(done, todo,
            concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Esempio n. 28
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join()  # Wait for all the work to be done
    except KeyboardInterrupt:  # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(
        done, todo, concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Esempio n. 29
0
def main():
    limit = handle_commandline()
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html")
    canceled = False
    todo = done = 0
    with open(filename, "wt", encoding="utf-8") as file:
        file.write("<!doctype html>\n")
        file.write("<html><head><title>What's New</title></head>\n")
        file.write("<body><h1>What's New</h1>\n")
        todo, done, canceled = write_body(file, limit)
        file.write("</body></html>\n")
    Qtrac.report("read {}/{} feeds{}".format(
        done, todo, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Esempio n. 30
0
def scale(size, smooth, source, target, concurrency):
    canceled = False
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(size, smooth, jobs, results, concurrency)
    todo = add_jobs(source, target, jobs)
    try:
        jobs.join()
    except KeyboardInterrupt:  # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty():  # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Esempio n. 31
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join()  # Wait for all the work to be done
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()  # return the queue's size: the count of the done jobs
        filename = None  # raise a OS Error if filename is None when web browser open it
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(done, todo,
            concurrency, " [canceled]" if canceled else ""))
    print()  # just a blank line
    if not canceled:
        webbrowser.open(filename)
Esempio n. 32
0
def wait_for(futures, env):
    canceled = False
    data = {}
    for future in concurrent.futures.as_completed(futures):
        err = future.exception()
        if err is None:
            result = future.result()
            #col_idx_arr += [result.col_idx]
            #ata += result.col_data
            data[result[0]] = result[1]
            #print(result)

        elif isinstance(err):
            Qtrac.report(str(err), True)
        else:
            raise err  # Unanticipated
    return data
Esempio n. 33
0
def scale(size, smooth, source, target, concurrency):
    canceled = False
    jobs = multiprocessing.JoinableQueue()  # 任务队列
    results = multiprocessing.Queue()       # 结果队列
    create_processes(size, smooth, jobs, results, concurrency)   # 创建进程,阻塞,直到任务队列中放入任务
    todo = add_jobs(source, target, jobs)  # 将任务队列中放入任务
    try:
        jobs.join()  # 生产者调用此方法进行阻塞,直到队列中所有的项目均被处理,阻塞将持续到队列中的每个项目调用task_done方法为止
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty(): # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Esempio n. 34
0
def worker(size, smooth, jobs, results):
    while True:  # infinite loop is to finished when main process ends. (Daemon = True)
        try:
            sourceImage, targetImage = jobs.get(
            )  # get images from queue. Blocked if no images to get.
            try:
                result = scale_one(size, smooth, sourceImage,
                                   targetImage)  # return Result
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
                results.put(
                    result)  # put the result(Result) into the results queue
            except Image.Error as err:
                Qtrac.report(str(err), True)  # True: it is an error
        finally:
            jobs.task_done()  # this job is done. One task is done.
Esempio n. 35
0
def scale(size, smooth, src_dir, dest_dir, num_procs):
    canceled = False
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(size, smooth, jobs, results, num_procs)
    todo = add_jobs(src_dir, dest_dir, jobs)
    try:
        jobs.join()
    except KeyboardInterrupt:  # catch Ctrl-C (may not work on Windows)
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty():  # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Esempio n. 36
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    datafile = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html")
    canceled = False
    with open(filename, "wt", encoding="utf-8") as file:
        write_header(file)
        pipeline = create_pipeline(limit, concurrency, file)
        try:
            for i, feed in enumerate(Feed.iter(datafile)):
                pipeline.send((feed, i % concurrency))
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
        write_footer(file, results.ok, results.todo, canceled, concurrency)
    if not canceled:
        webbrowser.open(filename)
Esempio n. 37
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    futures = set()
    with concurrent.futures.ProcessPoolExecutor(
            max_workers=concurrency) as executor:
        for feed in Feed.iter(filename):
            future = executor.submit(Feed.read, feed, limit)
            futures.add(future)
        done, filename, canceled = process(futures)
        if canceled:
            executor.shutdown()
    Qtrac.report("read {}/{} feeds using {} processes{}".format(
        done, len(futures), concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Esempio n. 38
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    futures = set()  # instances set of futures
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=concurrency) as executor:
        for feed in Feed.iter(filename):  # a generator of Feed(title, url)
            future = executor.submit(
                Feed.read, feed,
                limit)  # Feed.read is fn; feed and limit are fn's parameters
            futures.add(future)  # add future instance into futures set
        done, filename, canceled = process(futures)
        if canceled:
            executor.shutdown()
    Qtrac.report("read {}/{} feeds using {} threads{}".format(
        done, len(futures), concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Esempio n. 39
0
def wait_for(futures):
    canceled = False
    copied = scaled = 0
    try:
        for future in concurrent.futures.as_completed(
                futures
        ):  # block main process, wait for all sub processes done
            # as completed(futures, timeout=) returns a iterator: wait for each process done in timeout (block)
            err = future.exception()
            if err is None:
                result = future.result()
                copied += result.copied
                scaled += result.scaled
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
            elif isinstance(err, Image.Error):  # image file error
                Qtrac.report(str(err), True)
            else:
                raise err  # Unanticipated
    except KeyboardInterrupt:  # receive ctl+c to abort processing during running this py
        Qtrac.report("canceling...")
        canceled = True
        for future in futures:
            future.cancel(
            )  # cancel rest of the future instances: sub processes will not run these canceled instances
    return Summary(len(futures), copied, scaled, canceled)  # canceled = True
Esempio n. 40
0
def scale_one(size, smooth, sourceImage, targetImage):
    try:
        oldImage = Image.from_file(sourceImage)
        if oldImage.width <= size and oldImage.height <= size:
            oldImage.save(targetImage)
            return Result(1, 0, targetImage)
        else:
            if smooth:
                scale = min(size / oldImage.width, size / oldImage.height)
                newImage = oldImage.scale(scale)
            else:
                stride = int(
                    math.ceil(
                        max(oldImage.width / size, oldImage.height / size)))
                newImage = oldImage.subsample(stride)
            newImage.save(targetImage)
            return Result(0, 1, targetImage)
    except Exception as e:
        Qtrac.report("Problem scaling " + os.path.basename(sourceImage),
                     error=True)
        Qtrac.report(str(e), error=True)
        return Result(0, 0, sourceImage)
Esempio n. 41
0
def scale(size, smooth, source, target, concurrency):
    canceled = False
    jobs = multiprocessing.JoinableQueue(
    )  # make jobs queue: like Queue() but join() and task_done() added
    results = multiprocessing.Queue()  # make results queue: filled in worker()
    create_processes(size, smooth, jobs, results,
                     concurrency)  # Process in for -> daemon -> start()
    todo = add_jobs(
        source, target, jobs
    )  # fill jobs queue with source and target and return source images' names list
    try:  # queue.put() -> queue.task_done() in for -> queue.join()
        jobs.join()  # block main process until jobs queue is empty
    except KeyboardInterrupt:  # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty(
    ):  # queue results is filled in each worker() process
        result = results.get_nowait(
        )  # Remove and return an item from the queue without blocking
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled,
                   canceled)  # copied: the total number of copied images
Esempio n. 42
0
def scale(size, smooth, source, target):
    canceled = False
    todo = copied = scaled = 0
    for sourceImage, targetImage in get_jobs(source, target):
        try:
            todo += 1
            result = scale_one(size, smooth, sourceImage, targetImage)
            copied += result.copied
            scaled += result.scaled
            Qtrac.report("{} {}".format(
                "copied" if result.copied else "scaled",
                os.path.basename(targetImage)))
        except Image.Error as err:
            Qtrac.report(str(err), True)
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
            break
    return Summary(todo, copied, scaled, canceled)