Exemple #1
1
def main():
    size, smooth, src_dir, dest_dir, num_procs = handle_commandline()
    Qtrac.report("starting...")
    summary = scale(size, smooth, src_dir, dest_dir, num_procs)
    summarize(summary, num_procs)
Exemple #2
0
def scale(size, smooth, source, target, concurrency):
    
    canceled = False
    
    jobs = multiprocessing.JoinableQueue()
    
    results = multiprocessing.Queue()
    
    create_processes(size, smooth, jobs, results, concurrency)
    
    todo = add_jobs(source, target, jobs)
    
    try:
        jobs.join()
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    
    copied = scaled = 0
    
    while not results.empty(): # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    
    return Summary(todo, copied, scaled, canceled)
def results():
    while True:
        result = (yield)
        results.todo += result.todo
        results.copied += result.copied
        results.scaled += result.scaled
        Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
                os.path.basename(result.name)))
Exemple #4
0
def results():
    while True:
        result = (yield)
        results.todo += result.todo
        results.copied += result.copied
        results.scaled += result.scaled
        Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
                                    os.path.basename(result.name)))
Exemple #5
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    jobs = queue.Queue()
    results = queue.Queue()
    create_threads(limit, jobs, results, concurrency)
    todo = add_jobs(filename, jobs)
    process(todo, jobs, results, concurrency)
Exemple #6
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(limit, jobs, results, concurrency)
    todo = add_jobs(filename, jobs)
    process(todo, jobs, results, concurrency)
Exemple #7
0
def main():
    limit, how_many_threads = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")

    jobs = queue.Queue()
    results = queue.Queue()
    create_threads(limit, jobs, results, how_many_threads)
    todo = add_jobs(filename, jobs)
    process(todo, jobs, results, how_many_threads)
Exemple #8
0
def summarize(summary):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "single-threaded"
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
def summarize(summary, concurrency):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} processes".format(concurrency)
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Exemple #10
0
def main():
    size, smooth, source, target, concurrency = handle_commandline()
    Qtrac.report("starting...")
    canceled = False
    try:
        scale(size, smooth, source, target, concurrency)
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
    summarize(concurrency, canceled)
Exemple #11
0
def summarize(concurrency, canceled):
    message = "copied {} scaled {} ".format(results.copied, results.scaled)
    difference = results.todo - (results.copied + results.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} coroutines".format(concurrency)
    if canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
def main():
    size, smooth, source, target, concurrency = handle_commandline()
    Qtrac.report("starting...")
    canceled = False
    try:
        scale(size, smooth, source, target, concurrency)
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
    summarize(concurrency, canceled)
Exemple #13
0
def summarize(summary):
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "single-threaded"
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
def summarize(concurrency, canceled):
    message = "copied {} scaled {} ".format(results.copied, results.scaled)
    difference = results.todo - (results.copied + results.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} coroutines".format(concurrency)
    if canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Exemple #15
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")  # os.path.dirname(__file__) returns py file path
    # this dat file stores those news's url and title with Format: Title\nURL UTF-8
    jobs = queue.Queue()
    results = queue.Queue()
    create_threads(limit, jobs, results, concurrency)
    todo = add_jobs(filename, jobs)  # _todo is the title count of url
    process(todo, jobs, results, concurrency)
Exemple #16
0
def scaler(receiver, sink, size, smooth, me):
    while True:
        sourceImage, targetImage, who = (yield)
        if who == me:
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                sink.send(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        elif receiver is not None:
            receiver.send((sourceImage, targetImage, who))
def scaler(receiver, sink, size, smooth, me):
    while True:
        sourceImage, targetImage, who = (yield)
        if who == me:
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                sink.send(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        elif receiver is not None:
            receiver.send((sourceImage, targetImage, who))
Exemple #18
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()  # block until Feed is loaded into this queue. see def add_jobs()
            ok, result = Feed.read(feed, limit)  # open url in feed.url and return news title + body; ok: url open ok
            if not ok:
                Qtrac.report(result, True)  # result here is url title + error info
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))  # ignore the <ul> tag
                results.put(result)
        finally:
            jobs.task_done()  # each jobs.get() ends with jobs.task_done()
Exemple #19
0
def worker(limit, jobs, results):
    while True:  # 调用此方法的是守护线程,当主线程结束后自动会退出。
        try:
            feed = jobs.get()
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
Exemple #20
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
def worker(size, smooth, jobs, results):
    while True:
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("copied" if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
def worker(size, smooth, jobs, results):
    while True:
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("copied" if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()  # 使用者使用此方法发出信号,表示get的返回项目已经被处理。必须与get方法一一对应,如果调用此方法的次数大于从
Exemple #23
0
def worker(limit, jobs, results):
    while True:
        try:
            feed = jobs.get()
            # Feed is a namedtuple of (name, rss_url)
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {}".format(result[0][4:-6]))
                results.put(result)
        finally:
            jobs.task_done()
Exemple #24
0
def reader(receiver, sink, limit, me):
    while True:
        feed, who = (yield)
        if who == me:
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
                result = None
            else:
                Qtrac.report("read {} at {}".format(feed.title, feed.url))
            sink.send(result)
        elif receiver is not None:
            receiver.send((feed, who))
def reader(receiver, sink, limit, me):
    while True:
        feed, who = (yield)
        if who == me:
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
                result = None
            else:
                Qtrac.report("read {} at {}".format(feed.title, feed.url))
            sink.send(result)
        elif receiver is not None:
            receiver.send((feed, who))
Exemple #26
0
def worker(size, smooth, jobs, results):
    while True:
        try:
            scr_image, dest_image = jobs.get()
            try:
                result = scale_one(size, smooth, scr_image, dest_image)
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
                results.put(result)
            except Image.Error as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
Exemple #27
0
def main():
    
    t1 = time.time()
    
    print(multiprocessing.cpu_count())
    
    size, smooth, source, target, concurrency = (100,1,".",".",4) #handle_commandline()
    
    Qtrac.report("starting...")
    
    summary = scale(size, smooth, source, target, concurrency)
    
    v = time.time() - t1
    print("=="*20)
    print "time:%s" %(v)
Exemple #28
0
def worker(size, smooth, jobs, results):
    
    while True:
        
        try:
            sourceImage, targetImage = jobs.get()
            try:
                result = scale_one(size, smooth, sourceImage, targetImage)
                Qtrac.report("{} {}".format("[%s]"%result.copied if result.copied else
                        "scaled", os.path.basename(result.name)))
                results.put(result)
            except Exception as err:
                Qtrac.report(str(err), True)
        finally:
            jobs.task_done()
def summarize(summary, concurrency):
    """汇总所有的处理结果
    
    Arguments:
        summary {[type]} -- [description]
        concurrency {[type]} -- [description]
    """
    message = "copied {} scaled {} ".format(summary.copied, summary.scaled)
    difference = summary.todo - (summary.copied + summary.scaled)
    if difference:
        message += "skipped {} ".format(difference)
    message += "using {} processes".format(concurrency)
    if summary.canceled:
        message += " [canceled]"
    Qtrac.report(message)
    print()
Exemple #30
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join() # Wait for all the work to be done
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(done, todo,
            concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Exemple #31
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join()  # Wait for all the work to be done
    except KeyboardInterrupt:  # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(
        done, todo, concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Exemple #32
0
def main():
    limit = handle_commandline()
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html")
    canceled = False
    todo = done = 0
    with open(filename, "wt", encoding="utf-8") as file:
        file.write("<!doctype html>\n")
        file.write("<html><head><title>What's New</title></head>\n")
        file.write("<body><h1>What's New</h1>\n")
        todo, done, canceled = write_body(file, limit)
        file.write("</body></html>\n")
    Qtrac.report("read {}/{} feeds{}".format(
        done, todo, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
def main():
    limit = handle_commandline()
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html") 
    canceled = False
    todo = done = 0
    with open(filename, "wt", encoding="utf-8") as file:
        file.write("<!doctype html>\n")
        file.write("<html><head><title>What's New</title></head>\n")
        file.write("<body><h1>What's New</h1>\n")
        todo, done, canceled = write_body(file, limit)
        file.write("</body></html>\n")
    Qtrac.report("read {}/{} feeds{}".format(done, todo, " [canceled]" if
            canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
def wait_for(futures, env):
    canceled = False
    data = {}
    for future in concurrent.futures.as_completed(futures):
        err = future.exception()
        if err is None:
            result = future.result()
            #col_idx_arr += [result.col_idx]
            #ata += result.col_data
            data[result[0]] = result[1]
            #print(result)

        elif isinstance(err):
            Qtrac.report(str(err), True)
        else:
            raise err  # Unanticipated
    return data
Exemple #35
0
def main():
    regression = False
    size = int(1e6)
    if len(sys.argv) > 1 and sys.argv[1] == "-P":
        regression = True
        size = 20
    Qtrac.remove_if_exists(os.path.join(tempfile.gettempdir(), "point.db"))
    start = time.clock()
    points = []
    for i in range(size):
        points.append(Point(i, i**2, i // 2))
    end = time.clock() - start
    assert points[size - 1].x == size - 1
    print(len(points))
    if not regression:  # wait until we can see how much memory is used
        print("took {} secs to create {:,} points".format(end, size))
        input("press Enter to finish")
def scale(size, smooth, source, target, concurrency):
    canceled = False
    jobs = multiprocessing.JoinableQueue()  # 任务队列
    results = multiprocessing.Queue()       # 结果队列
    create_processes(size, smooth, jobs, results, concurrency)   # 创建进程,阻塞,直到任务队列中放入任务
    todo = add_jobs(source, target, jobs)  # 将任务队列中放入任务
    try:
        jobs.join()  # 生产者调用此方法进行阻塞,直到队列中所有的项目均被处理,阻塞将持续到队列中的每个项目调用task_done方法为止
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty(): # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Exemple #37
0
def worker(size, smooth, jobs, results):
    while True:  # infinite loop is to finished when main process ends. (Daemon = True)
        try:
            sourceImage, targetImage = jobs.get(
            )  # get images from queue. Blocked if no images to get.
            try:
                result = scale_one(size, smooth, sourceImage,
                                   targetImage)  # return Result
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
                results.put(
                    result)  # put the result(Result) into the results queue
            except Image.Error as err:
                Qtrac.report(str(err), True)  # True: it is an error
        finally:
            jobs.task_done()  # this job is done. One task is done.
Exemple #38
0
def process(todo, jobs, results, concurrency):
    canceled = False
    try:
        jobs.join()  # Wait for all the work to be done
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    if canceled:
        done = results.qsize()  # return the queue's size: the count of the done jobs
        filename = None  # raise a OS Error if filename is None when web browser open it
    else:
        done, filename = output(results)
    Qtrac.report("read {}/{} feeds using {} threads{}".format(done, todo,
            concurrency, " [canceled]" if canceled else ""))
    print()  # just a blank line
    if not canceled:
        webbrowser.open(filename)
Exemple #39
0
def scale(size, smooth, src_dir, dest_dir, num_procs):
    canceled = False
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(size, smooth, jobs, results, num_procs)
    todo = add_jobs(src_dir, dest_dir, jobs)
    try:
        jobs.join()
    except KeyboardInterrupt:  # catch Ctrl-C (may not work on Windows)
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty():  # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Exemple #40
0
def scale(size, smooth, src_dir, dest_dir, num_procs):
    canceled = False
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(size, smooth, jobs, results, num_procs)
    todo = add_jobs(src_dir, dest_dir, jobs)
    try:
        jobs.join()
    except KeyboardInterrupt: # catch Ctrl-C (may not work on Windows)
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty():  # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled)
Exemple #41
0
def main():
    regression = False
    size = int(1e6)
    if len(sys.argv) > 1 and sys.argv[1] == "-P":
        regression = True
        size = 20
    Qtrac.remove_if_exists(os.path.join(tempfile.gettempdir(), "point.db"))
    start = time.clock()
    points = []
    for i in range(size):
        points.append(Point(i, i ** 2, i // 2))
    end = time.clock() - start
    assert points[size - 1].x == size - 1
    print(len(points))
    if not regression:  # wait until we can see how much memory is used
        print("took {} secs to create {:,} points".format(end, size))
        input("press Enter to finish")
Exemple #42
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    futures = set()
    with concurrent.futures.ProcessPoolExecutor(
            max_workers=concurrency) as executor:
        for feed in Feed.iter(filename):
            future = executor.submit(Feed.read, feed, limit)
            futures.add(future)
        done, filename, canceled = process(futures)
        if canceled:
            executor.shutdown()
    Qtrac.report("read {}/{} feeds using {} processes{}".format(
        done, len(futures), concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    futures = set()
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=concurrency) as executor:
        for feed in Feed.iter(filename):
            future = executor.submit(Feed.read, feed, limit)
            futures.add(future)
        done, filename, canceled = process(futures)
        if canceled:
            executor.shutdown()
    Qtrac.report("read {}/{} feeds using {} threads{}".format(done,
            len(futures), concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Exemple #44
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    datafile = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html")
    canceled = False
    with open(filename, "wt", encoding="utf-8") as file:
        write_header(file)
        pipeline = create_pipeline(limit, concurrency, file)
        try:
            for i, feed in enumerate(Feed.iter(datafile)):
                pipeline.send((feed, i % concurrency))
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
        write_footer(file, results.ok, results.todo, canceled, concurrency)
    if not canceled:
        webbrowser.open(filename)
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    datafile = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    filename = os.path.join(tempfile.gettempdir(), "whatsnew.html") 
    canceled = False
    with open(filename, "wt", encoding="utf-8") as file:
        write_header(file)
        pipeline = create_pipeline(limit, concurrency, file)
        try:
            for i, feed in enumerate(Feed.iter(datafile)):
                pipeline.send((feed, i % concurrency))
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
        write_footer(file, results.ok, results.todo, canceled,
                concurrency)
    if not canceled:
        webbrowser.open(filename)
Exemple #46
0
def main():
    limit, concurrency = handle_commandline()
    Qtrac.report("starting...")
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    futures = set()  # instances set of futures
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=concurrency) as executor:
        for feed in Feed.iter(filename):  # a generator of Feed(title, url)
            future = executor.submit(
                Feed.read, feed,
                limit)  # Feed.read is fn; feed and limit are fn's parameters
            futures.add(future)  # add future instance into futures set
        done, filename, canceled = process(futures)
        if canceled:
            executor.shutdown()
    Qtrac.report("read {}/{} feeds using {} threads{}".format(
        done, len(futures), concurrency, " [canceled]" if canceled else ""))
    print()
    if not canceled:
        webbrowser.open(filename)
Exemple #47
0
def scale_one(size, smooth, sourceImage, targetImage):
    try:
        oldImage = Image.from_file(sourceImage)
        if oldImage.width <= size and oldImage.height <= size:
            oldImage.save(targetImage)
            return Result(1, 0, targetImage)
        else:
            if smooth:
                scale = min(size / oldImage.width, size / oldImage.height)
                newImage = oldImage.scale(scale)
            else:
                stride = int(math.ceil(max(oldImage.width / size,
                                           oldImage.height / size)))
                newImage = oldImage.subsample(stride)
            newImage.save(targetImage)
            return Result(0, 1, targetImage)
    except Exception as e:
        Qtrac.report("Problem scaling " +
                     os.path.basename(sourceImage), error=True)
        Qtrac.report(str(e), error=True)
        return Result(0, 0, sourceImage)
Exemple #48
0
def wait_for(futures):
    canceled = False
    copied = scaled = 0
    try:
        for future in concurrent.futures.as_completed(
                futures
        ):  # block main process, wait for all sub processes done
            # as completed(futures, timeout=) returns a iterator: wait for each process done in timeout (block)
            err = future.exception()
            if err is None:
                result = future.result()
                copied += result.copied
                scaled += result.scaled
                Qtrac.report("{} {}".format(
                    "copied" if result.copied else "scaled",
                    os.path.basename(result.name)))
            elif isinstance(err, Image.Error):  # image file error
                Qtrac.report(str(err), True)
            else:
                raise err  # Unanticipated
    except KeyboardInterrupt:  # receive ctl+c to abort processing during running this py
        Qtrac.report("canceling...")
        canceled = True
        for future in futures:
            future.cancel(
            )  # cancel rest of the future instances: sub processes will not run these canceled instances
    return Summary(len(futures), copied, scaled, canceled)  # canceled = True
Exemple #49
0
def scale(size, smooth, source, target):
    canceled = False
    todo = copied = scaled = 0
    for sourceImage, targetImage in get_jobs(source, target):
        try:
            todo += 1
            result = scale_one(size, smooth, sourceImage, targetImage)
            copied += result.copied
            scaled += result.scaled
            Qtrac.report("{} {}".format("copied" if result.copied else "scaled", os.path.basename(targetImage)))
        except Image.Error as err:
            Qtrac.report(str(err), True)
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
            break
    return Summary(todo, copied, scaled, canceled)
def write_body(file, limit):
    canceled = False
    todo = done = 0
    filename = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
    for feed in Feed.iter(filename):
        todo += 1
        try:
            ok, result = Feed.read(feed, limit)
            if not ok:
                Qtrac.report(result, True)
            elif result is not None:
                Qtrac.report("read {} at {}".format(feed.title, feed.url))
                for item in result:
                    file.write(item)
                done += 1
        except KeyboardInterrupt:
            Qtrac.report("canceling...")
            canceled = True
            break
    return todo, done, canceled
def wait_for(futures):
    canceled = False
    results = []
    try:
        for future in concurrent.futures.as_completed(futures):
            err = future.exception()
            if err is None:
                ok, result = future.result()
                if not ok:
                    Qtrac.report(result, True)
                elif result is not None:
                    Qtrac.report("read {}".format(result[0][4:-6]))
                results.append((ok, result))
            else:
                raise err # Unanticipated
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
        for future in futures:
            future.cancel()
    return canceled, results
Exemple #52
0
def wait_for(futures):
    canceled = False
    copied = scaled = 0
    try:
        for future in concurrent.futures.as_completed(futures):
            err = future.exception()
            if err is None:
                result = future.result()
                copied += result.copied
                scaled += result.scaled
                Qtrac.report("{} {}".format("copied" if result.copied else
                        "scaled", os.path.basename(result.name)))
            elif isinstance(err, Image.Error):
                Qtrac.report(str(err), True)
            else:
                raise err # Unanticipated
    except KeyboardInterrupt:
        Qtrac.report("canceling...")
        canceled = True
        for future in futures:
            future.cancel()
    return Summary(len(futures), copied, scaled, canceled)
Exemple #53
0
def main():
    size, smooth, source, target, concurrency = handle_commandline()
    Qtrac.report("starting...")
    summary = scale(size, smooth, source, target, concurrency)
    summarize(summary, concurrency)
def write_footer(file, ok, todo, canceled, concurrency):
    file.write("</body></html>\n")
    Qtrac.report("read {}/{} feeds using {} coroutines{}".format(ok, todo,
            concurrency, " [canceled]" if canceled else ""))
    print()