コード例 #1
0
ファイル: network.py プロジェクト: Vitalii8086/batch-scoring
class Network(object):

    def __init__(self, concurrency, timeout):
        self._executor = ThreadPoolExecutor(concurrency)
        self._timeout = timeout

    def _request(self, request):
        try:
            session = requests.Session()
            prepared = session.prepare_request(request)
            response = session.send(prepared, timeout=self._timeout)
        except Exception as exc:
            logger.warning('Exception {}: {}'.format(type(exc), exc))
            callback = request.kwargs['hooks']['response']
            response = FakeResponse(400, 'No Response')
            callback(response)

    def perform_requests(self, requests):
        return self._executor.map(self._request, requests)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._executor.shutdown(wait=False)
コード例 #2
0
ファイル: main.py プロジェクト: dvogel/usabenford
def main():
    timewarp = ThreadPoolExecutor(2)
    for fiscal_year in settings.FISCAL_YEARS:
        results = timewarp.map(lambda combs: apply(download_and_analyze, combs),
                               usaspending.file_param_combs(fiscal_year))
        for result in results:
            success = result[0]
            if success:
                analyses = result[1]
                if analyses:
                    for dt1, field_analyses in analyses.items():
                        for field_name, analysis in field_analyses.items():
                            print "Analysis completed for {fy}, {m}/{y}, {a}, {st}, {fld}".format(
                                fy=analysis['fiscal_year'],
                                m=analysis['month'],
                                y=analysis['year'],
                                a=analysis['agency'],
                                st=analysis['spending_type'],
                                fld=analysis['field_name'])
            else:
                error = result[1]
                if isinstance(error, DownloadFileFailure):
                    print >>sys.stderr, "Failed to download %s because %s" % (error.filename, error.cause)
                else:
                    print >>sys.stderr, str(error)
コード例 #3
0
ファイル: network.py プロジェクト: datarobot/batch-scoring
class Network(object):

    def __init__(self, concurrency, timeout, ui=None):
        self._executor = ThreadPoolExecutor(concurrency)
        self._timeout = timeout
        self.session = requests.Session()
        self._ui = ui or logger
        self.futures = []
        self.concurrency = concurrency

    def _request(self, request):
        prepared = self.session.prepare_request(request)
        try:
            self.session.send(prepared, timeout=self._timeout)
        except requests.exceptions.ReadTimeout:
            self._ui.warning(textwrap.dedent("""The server did not send any data
in the allotted amount of time.
You might want to decrease the "--n_concurrent" parameters
or
increase "--timeout" parameter.
"""))

        except Exception as exc:
            self._ui.debug('Exception {}: {}'.format(type(exc), exc))
            try:
                callback = request.kwargs['hooks']['response']
            except AttributeError:
                callback = request.hooks['response'][0]
            response = FakeResponse(400, 'No Response')
            callback(response)

    def perform_requests(self, requests):
        for r in requests:
            while True:
                self.futures = [i for i in self.futures if not i.done()]
                if len(self.futures) < self.concurrency:
                    self.futures.append(self._executor.submit(self._request,
                                                              r))
                    break
                else:
                    sleep(0.1)
            yield
        #  wait for all batches to finish before returning
        while self.futures:
            f_len = len(self.futures)
            self.futures = [i for i in self.futures if not i.done()]
            if f_len != len(self.futures):
                self._ui.debug('Waiting for final requests to finish. '
                               'remaining requests: {}'
                               ''.format(len(self.futures)))
            sleep(0.1)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._executor.shutdown(wait=False)
コード例 #4
0
    def __init__(self, maxWorkers, queueSize):
        self.maxWorkers = maxWorkers
        self._pool = ThreadPoolExecutor(max_workers=maxWorkers)
        self._pool._work_queue.maxsize = queueSize
        #self._pool = ProcessPoolExecutor(max_workers=20)
        #self._pool._work_ids.maxsize = 2

        self.processed = 0
        self.debugger = self.__class__.debugger(self)
        self.debugger.start()
コード例 #5
0
    def get_current_prices(self, instance_type='c4.large'):
        thread_pool = TPE(4)
        filename = 'spotprices_{0}.pkl'.format(instance_type)
        if self._price_data_is_old(filename):
            workers = thread_pool.map(lambda x, instance_type=instance_type: self.get_price_for_region(region=x, instance_type=instance_type), self._get_regions())
            results = list(zip(self._get_regions(),[result for result in workers]))
            result_file = open(filename,'wb')
            pickle.dump(results,result_file)
            result_file.close()
        else:
            result_file = open(filename,'rb')
            results = pickle.load(result_file)

        return results
コード例 #6
0
    def run(self, force=False):
        """
        Process items in this queue

        :param force: Force queue processing (currently not implemented)
        """

        if self.amActive:
            return

        with self.lock:
            self.amActive = True

            # if there's something in the queue then run it in a thread and take it out of the queue
            while not self.empty():
                if self.queue[0][0] < self.min_priority:
                    return

                # execute item in queue
                with ThreadPoolExecutor(1) as executor:
                    if self.stop.isSet():
                        executor._threads.clear()
                        thread._threads_queues.clear()
                        executor.shutdown()
                        return

                    executor.submit(self.callback)

            self.amActive = False
コード例 #7
0
ファイル: arbitrer.py プロジェクト: szs8/bitcoin-arbitrage
 def __init__(self):
     self.markets = []
     self.observers = []
     self.updated_markets = {}
     self.init_markets(config.markets)
     self.init_observers(config.observers)
     self.threadpool = ThreadPoolExecutor(max_workers=10)
コード例 #8
0
ファイル: network.py プロジェクト: datarobot/batch-scoring
 def __init__(self, concurrency, timeout, ui=None):
     self._executor = ThreadPoolExecutor(concurrency)
     self._timeout = timeout
     self.session = requests.Session()
     self._ui = ui or logger
     self.futures = []
     self.concurrency = concurrency
コード例 #9
0
ファイル: consumer.py プロジェクト: oencoding/ListenTV
def consume_from_balancer(balancer, playlists, destination, encrypt=False):
    '''
    Consume all active playlist resources from ``balancer`` and
    report status to it.

    '''
    def consume_resource(playlist_resource):
        m3u8_uri = "{server}:{port}{path}".format(
            server=playlist_resource.server.server,
            port=playlist_resource.server.port,
            path=playlists['streams'][playlist_resource.key]['input-path'])

        try:
            segments_modified = consume(m3u8_uri, destination, encrypt)
        except (httplib.HTTPException, urllib2.HTTPError, IOError, OSError) as err:
            logging.warning(u'Notifying error for resource %s: %s' % (m3u8_uri, err))
            balancer.notify_error()
        else:
            if segments_modified:
                logging.info('Notifying content modified: %s' % m3u8_uri)
                balancer.notify_modified()
                m3u8_path = os.path.join(build_full_path(destination, m3u8_uri), os.path.basename(m3u8_uri))
                transcode_playlist(playlists, playlist_resource.key, segments_modified, m3u8_path)
            else:
                logging.debug('Content not modified: %s' % m3u8_uri)
    try:
        with ThreadPoolExecutor(max_workers=NUM_THREAD_WORKERS) as executor:
            list(executor.map(consume_resource, balancer.actives, timeout=CONSUME_TIMEOUT))
    except TimeoutError:
        balancer.notify_error()
コード例 #10
0
ファイル: hbasengram.py プロジェクト: d2207197/linggle-aan
    def query(self, query, limit=None):
        '''
        query(query, limit=None) -> list of Row()

        e.g.
         bnchb = HBaseNgram('hadoop.nlpweb.org', 'bnc-all-cnt-ngram')
         result = bnchb.query('play * ?* role', limit = 10)
         for row in result:
              print row
        '''

        parser = queryparser()
        query += ' STOPHERE'
        querys = parser.parseString(query)[0]
        LOGGER.debug('querys: {}'.format(querys))
        from itertools import imap
        from operator import attrgetter
        if any(imap(len, imap(attrgetter('filters'), querys))):
            limit_timse = 15
        else:
            limit_timse = 1

        limited_scan = partial(self._scan, limit=limit * limit_timse)
        from futures import ThreadPoolExecutor
        with ThreadPoolExecutor(max_workers=20) as e:
            results = e.map(limited_scan, querys)
            # results =  map (limited_scan, querys)
            # LOGGER.debug('results: {}'.format(results))
            return list(islice(self._merge(results), limit))
class MyWidget(DisplayWidget):
    def __init__(self, parent=None):
        super(MyWidget, self).__init__(parent)
        self._executor = ThreadPoolExecutor(max_workers=4)

    def _button_clicked(self):
        future = self._executor.submit(download_data)
        future.add_done_callback(self._populate_textarea)

    def _populate_textarea(self, future):
        self._textarea.setPlainText(future.result())
コード例 #12
0
ファイル: __init__.py プロジェクト: legroucha/SiCKRAGE
    def action(self, query, *args):
        """
        Execute single query

        :rtype: query results
        :param query: Query string
        """

        sickrage.srCore.srLogger.db("{}: {} with args {}".format(self.filename, query, args))

        with ThreadPoolExecutor(1) as executor, self.transaction() as tx:
            return executor.submit(tx.query, [query, list(*args)]).result()
コード例 #13
0
ファイル: __init__.py プロジェクト: legroucha/SiCKRAGE
    def upsert(self, tableName, valueDict, keyDict):
        """
        Update values, or if no updates done, insert values
        TODO: Make this return true/false on success/error

        :param tableName: table to update/insert
        :param valueDict: values in table to update/insert
        :param keyDict:  columns in table to update
        """

        with ThreadPoolExecutor(1) as executor, self.transaction() as tx:
            return executor.submit(tx.upsert, tableName, valueDict, keyDict).result()
コード例 #14
0
ファイル: frequests.py プロジェクト: hexiyou/frequests
def imap(requests, stream=True, size=2, **kwargs):
    """Concurrently converts a generator object of Requests to
    a generator of Responses.

    :param requests: a generator of Request objects.
    :param stream: If False, the content will not be downloaded immediately.
    :param size: Specifies the number of requests to make at a time. default is 2
    """
    def stream():
        while True:
            yield stream

    with ThreadPoolExecutor(max_workers=size) as executor:
        for response in executor.map(send, requests, stream(), **kwargs):
            yield response
コード例 #15
0
ファイル: frequests.py プロジェクト: hexiyou/frequests
def map(requests, stream=True, size=1, **kwargs):
    """Concurrently converts a list of Requests to Responses.

    :param requests: a collection of Request objects.
    :param stream: If False, the content will not be downloaded immediately.
    :param size: Specifies the number of requests to make at a time. If 1, no throttling occurs.
    """

    requests = list(requests)

    with ThreadPoolExecutor(max_workers=size) as executor:
        responses = list(
            executor.map(send, requests, [stream] * len(requests), **kwargs))

    return responses
コード例 #16
0
ファイル: consumer.py プロジェクト: oencoding/ListenTV
def download_segments(playlist, destination_path, new_key):
    uris = [segment.absolute_uri for segment in playlist.segments]

    def download(uri):
        try:
            return download_to_file(uri, destination_path, playlist.key, new_key)
        except urllib2.HTTPError as err:
            if err.code == 404:
                logging.warning(u'Got 404 trying to download %s' % (uri,))
                return None
            raise

    with ThreadPoolExecutor(max_workers=NUM_THREAD_WORKERS) as executor:
        downloads = executor.map(download, uris, timeout=CONSUME_TIMEOUT)
        return list(downloads)
コード例 #17
0
ファイル: __init__.py プロジェクト: legroucha/SiCKRAGE
    def mass_action(self, queries):
        """
        Execute multiple queries

        :param queries: list of queries
        :return: list of results
        """

        sqlResults = []

        q = Queue()
        map(q.put, queries)
        while not q.empty():
            with ThreadPoolExecutor(1) as executor, self.transaction() as tx:
                sqlResults += [executor.submit(tx.query, q.get()).result()]

        sickrage.srCore.srLogger.db("{} Transactions executed".format(len(sqlResults)))
        return sqlResults
コード例 #18
0
ファイル: __init__.py プロジェクト: legroucha/SiCKRAGE
    def mass_upsert(self, upserts):
        """
        Execute multiple upserts

        :param upserts: list of upserts
        :return: list of results
        """

        sqlResults = []

        q = Queue()
        map(q.put, upserts)
        while not q.empty():
            with ThreadPoolExecutor(1) as executor, self.transaction() as tx:
                sqlResults += [executor.submit(tx.upsert, *q.get()).result()]

        sickrage.srCore.srLogger.db("{} Upserts executed".format(len(sqlResults)))

        return sqlResults
コード例 #19
0
ファイル: network.py プロジェクト: jaydenwhyte/batch-scoring
    def run(self, dry_run=False):
        if dry_run:
            i = 0
            for _ in self.perform_requests(True):
                i += 1

            return i

        self._executor = ThreadPoolExecutor(self.concurrency)
        self.session = requests.Session()
        adapter = requests.adapters.HTTPAdapter(
            pool_connections=self.concurrency, pool_maxsize=self.concurrency)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

        t0 = time()
        last_report = time()
        i = 0
        r = None
        for r in self.perform_requests():
            if r is not True:
                i += 1
                self.ui.info('{} responses sent | time elapsed {}s'.format(
                    i,
                    time() - t0))

                if time() - last_report > REPORT_INTERVAL:
                    self.progress_queue.put(
                        (ProgressQueueMsg.NETWORK_PROGRESS, {
                            "processed": self.n_requests,
                            "retried": self.n_retried,
                            "consumed": self.n_consumed,
                            "rusage": get_rusage(),
                        }))
                    last_report = time()

        self.progress_queue.put((ProgressQueueMsg.NETWORK_DONE, {
            "ret": r,
            "processed": self.n_requests,
            "retried": self.n_retried,
            "consumed": self.n_consumed,
            "rusage": get_rusage(),
        }))
コード例 #20
0
class PlayApi(tornado.web.RequestHandler):
    executor = ThreadPoolExecutor(max_workers=4)

    def initialize(self):
        self.set_header('Content-Type', 'application/json')

    @gen.coroutine
    def get(self):
        self.play()
        self.write(json.dumps({'success': 0}))
        self.flush()

    @run_on_executor
    def play(self):
        """Publish line in a background task."""
        for i, c in enumerate(cs):
            if i == 0:
                line = {
                    'type': 'path',
                    'lat1': c['lat'],
                    'lon1': c['lon'],
                    'lat2': c['lat'],
                    'lon2': c['lon'],
                }
            else:
                line = {
                    'type': 'path',
                    'lat1': cs[i - 1]['lat'],
                    'lon1': cs[i - 1]['lon'],
                    'lat2': c['lat'],
                    'lon2': c['lon'],
                }

            kwargs = {'message': json.dumps(line)}
            self.application.pc.publish_message(**kwargs)
            print " [x] Sent:", kwargs['message']
            sleep(1)
コード例 #21
0
ファイル: tasking.py プロジェクト: SWPFlow/rest-python
#
from functools import wraps, partial
from tornado import gen, concurrent
from tornado import ioloop

# Suppressed known DeprecationWarning for the futures backport
import warnings, exceptions
warnings.filterwarnings("ignore", "The futures package has been deprecated.*",
                        exceptions.DeprecationWarning, "futures")
import futures

import logging
import sys
from futures import ThreadPoolExecutor

EXECUTOR = ThreadPoolExecutor(100)

_LINE = '%' * 40


def safe_return_future(func):
    '''
        Identical to tornado.gen.return_future plus
        thread safety.  Executes the callback in 
        the ioloop thread
    '''
    @wraps(func)
    def exec_func(*args, **kwargs):

        future = concurrent.TracebackFuture()
コード例 #22
0
# -*- coding: UTF-8 -*-
__author__ = "rody800"

from futures import ThreadPoolExecutor
from functools import partial, wraps
import time
import tornado.ioloop
import tornado.web
''' 本例子通过http get 请求异步读取服务器上一个文件
    实现对其他http请求不影响                                                                         
'''

tpexe = ThreadPoolExecutor(max_workers=2)


class IndexHandler(tornado.web.RequestHandler):
    def get(self):
        self.write("This is index page")


class FileHandler(tornado.web.RequestHandler):
    @tornado.web.asynchronous
    def get(self, filename):
        tpexe.submit(partial(self.readfile, filename)).add_done_callback(
            lambda future: tornado.ioloop.IOLoop.instance().add_callback(
                partial(self.callback_func, future)))

    def callback_func(self, future):
        self.write(future.result())
        self.finish()
 def __init__(self, parent=None):
     super(MyWidget, self).__init__(parent)
     self._executor = ThreadPoolExecutor(max_workers=4)
コード例 #24
0
ファイル: network.py プロジェクト: Vitalii8086/batch-scoring
 def __init__(self, concurrency, timeout):
     self._executor = ThreadPoolExecutor(concurrency)
     self._timeout = timeout
コード例 #25
0
ファイル: find_all_instances.py プロジェクト: rtpg/junkcode
 def find_all_instances(self):
     thread_pool = TPE(4)
     workers = thread_pool.map(lambda x: self.find_instances(region=x), self._get_regions())
     results = list(zip(self._get_regions(),[result for result in workers]))
     return results
コード例 #26
0
class Pool(object):
    __metaclass__ = ABCMeta
    class debugger(threading.Thread):
        def __init__(self, pool, interval = 5):
            self.pool = pool
            self.interval = interval
            threading.Thread.__init__(self)

        def start(self):
            self._running = True
            self.startTime = time.time()
            self.lastTime = time.time()
            self.lastNumber = 0
            self.numberAtStart = self.pool.processed
            threading.Thread.start(self)

        def stop(self):
            self._running = False

        def debug(self):
            meanSpeed = (self.pool.processed - self.numberAtStart) / (time.time() - self.startTime)
            instantSpeed = (self.pool.processed - self.lastNumber) / (time.time() - self.lastTime)
            print "%s Threads: %s Remaining: %s Speed: %s / %s Done: %s" % (
                ("["+self.pool.name+"]").ljust(15),
                str(self.pool.maxWorkers).ljust(4),
                str(self.pool.getQueueSize()).ljust(3),
                ("%.2f" % instantSpeed).ljust(9),
                ("%.2f" % meanSpeed).ljust(9),
                str(self.pool.processed)
            )
            self.lastTime = time.time()
            self.lastNumber = self.pool.processed

        def run(self):
            while(self._running):
                self.debug()
                time.sleep(self.interval)

    def __init__(self, maxWorkers, queueSize):
        self.maxWorkers = maxWorkers
        self._pool = ThreadPoolExecutor(max_workers=maxWorkers)
        self._pool._work_queue.maxsize = queueSize
        #self._pool = ProcessPoolExecutor(max_workers=20)
        #self._pool._work_ids.maxsize = 2

        self.processed = 0
        self.debugger = self.__class__.debugger(self)
        self.debugger.start()

    def getQueueSize(self):
        return self._pool._work_queue.qsize()
        #return self._pool._work_ids.qsize()*self.maxWorkers


    @property
    def name(self):
        return self.__class__.__name__

    def submit(self, task, *args, **kwargs):
        def handleSubmit():
            try:
                result = task(*args, **kwargs)
            except Exception as e:
                self.handleError(task, e)
            else:
                self.agregate(task, result)
            self.processed += 1

        self._pool.submit(handleSubmit)

    def waitAndShutdown(self):
        self._pool.shutdown(wait = True)
        self.debugger.stop()

    @abstractmethod
    def handleError(self, task, e):
        pass

    @abstractmethod
    def agregate(self, task, result):
        pass
コード例 #27
0
ファイル: arbitrer.py プロジェクト: szs8/bitcoin-arbitrage
class Arbitrer(object):
    def __init__(self):
        self.markets = []
        self.observers = []
        self.updated_markets = {}
        self.init_markets(config.markets)
        self.init_observers(config.observers)
        self.threadpool = ThreadPoolExecutor(max_workers=10)

    def init_markets(self, markets):
        self.market_names = markets
        for market_name in markets:
            try:
#                importlib.import_module('public_markets.' + market_name.lower())
                exec('from public_markets import ' + market_name.lower())
                market = eval( 'public_markets.' + market_name.lower() + '.' +
                              market_name + '()')
                self.markets.append(market)
            except (ImportError, AttributeError) as e:
                print e
                print("%s market name is invalid: Ignored (you should check your config file)" % (market_name))

    def init_observers(self, _observers):
        self.observer_names = _observers
        for observer_name in _observers:
            try:
                exec('import observers.' + observer_name.lower())
                observer = eval('observers.' + observer_name.lower() + '.' +
                                observer_name + '()')
                self.observers.append(observer)
            except (ImportError, AttributeError) as e:
                print("%s observer name is invalid: Ignored (you should check your config file)" % (observer_name))

    def get_profit_for(self, mi, mj, kask, kbid):
        buy_market = self.updated_markets[kask]
        sell_market = self.updated_markets[kbid]

        if buy_market.ask(mi) >= sell_market.bid(mj):
            return 0, 0, 0, 0, 0

        max_amount_buy = buy_market.cum_asize(mi)
        max_amount_sell = sell_market.cum_bsize(mj)
        max_amount = min(max_amount_buy, max_amount_sell, config.max_tx_volume)

        w_buyprice, buy_total = buy_market.wavg_ask(max_amount)
        w_sellprice, sell_total = sell_market.wavg_bid(max_amount)

        profit = sell_total * w_sellprice - buy_total * w_buyprice
        comm = (sell_total * w_sellprice + buy_total * w_buyprice) * (0.2 / 100)
        profit -= comm
        return profit, comm, sell_total, w_buyprice, w_sellprice

    def get_max_depth(self, kask, kbid, max_depth_levels=5):
        """

        :param kask: Market name where we can supposed buy  (ask is lower than nbbo bid)
        :param kbid: Market name where we can supposed sell (bid is higher than nbbo ask)
        :return: (i, j) where i = number of levels of kask market lower than nbbo bid
                  j = number of levels of kbid market lower than nbbo ask
        """
        buy_market = self.updated_markets[kask]    # Buy at this market's ask
        sell_market = self.updated_markets[kbid]   # Sell at this market's bid

        # Find all prices that we can buy at (< ref_price)
        ref_price = sell_market.bid()
        for i, ask in enumerate(buy_market.iter_asks()):
            if ref_price < ask or i >= max_depth_levels:
                break

        # Find all the prices we can sell at (> ref_price)
        ref_price = buy_market.ask()
        for j, bid in enumerate(sell_market.iter_bids()):
            if ref_price > bid or j >= max_depth_levels:
                break

        return i, j


    def arbitrage_depth_opportunity(self, kask, kbid):
        """

        :param kask: Market name to buy at
        :param kbid: Market name to sell at
        :return:
        """
        maxi, maxj = self.get_max_depth(kask, kbid)

        buy_market = self.updated_markets[kask]  # Buy at this market's ask
        sell_market = self.updated_markets[kbid]  # Sell at this market's bid

        max_trade_size = min(buy_market.cum_asize(maxi), sell_market.cum_bsize(maxj),
                             config.max_tx_volume)

        w_buyprice, buy_total = buy_market.wavg_ask(max_trade_size)
        w_sellprice, sell_total = sell_market.wavg_bid(max_trade_size)

        profit = sell_total * w_sellprice - buy_total * w_buyprice
        comm = (sell_total * w_sellprice + buy_total * w_buyprice) * (0.2 / 100)
        profit -= comm

        return profit, comm, max_trade_size, \
               self.updated_markets[kask].ask(), \
               self.updated_markets[kbid].bid(), \
               w_buyprice, w_sellprice


    def arbitrage_opportunity(self, kask, ask, kbid, bid):
        """

        :param kask: Market name to buy at
        :param ask:  buy price
        :param kbid: Market name to sell at
        :param bid: sell price
        :return:
        """
        profit, comm, volume, buyprice, sellprice, weighted_buyprice,\
            weighted_sellprice = self.arbitrage_depth_opportunity(kask, kbid)

        if profit < 0:
            return

        if volume == 0 or buyprice == 0:
            return
        perc2 = (1 - (volume - (profit / buyprice)) / volume) * 100
        for observer in self.observers:
            observer.opportunity(
                profit, comm, volume, buyprice, kask, sellprice, kbid,
                perc2, weighted_buyprice, weighted_sellprice)

    def __get_market_depth(self, market, depths):
        _ = market.get_depth()
        depths[market.name] = market

    def update_depths(self):
        depths = {}
        futures = []
        for market in self.markets:
            futures.append(self.threadpool.submit(self.__get_market_depth,
                                                  market, depths))
        wait(futures, timeout=20)
        return depths

    def tickers(self):
        for market in self.markets:
            logging.verbose("ticker: " + market.name + " - " + str(
                market.get_ticker()))

    def replay_history(self, directory):
        import os
        import json
        import pprint
        files = os.listdir(directory)
        files.sort()
        for f in files:
            depths = json.load(open(directory + '/' + f, 'r'))
            self.updated_markets = {}
            for market in self.market_names:
                if market in depths:
                    self.updated_markets[market] = depths[market]
            self.tick()

    def tick(self):
        for observer in self.observers:
            observer.begin_opportunity_finder(self.updated_markets)

        for kmarket1 in self.updated_markets:
            for kmarket2 in self.updated_markets:
                if kmarket1 == kmarket2:  # same market
                    continue
                market1 = self.updated_markets[kmarket1]
                market2 = self.updated_markets[kmarket2]

                # is market1.ask < market2.bid ?
                if market1.is_valid() and market2.is_valid():
                    if market2.bid() > market1.ask():
                        self.arbitrage_opportunity(kmarket1, market1.ask(),
                                                   kmarket2, market2.bid())

        for observer in self.observers:
            observer.end_opportunity_finder()

        for observer in self.observers:
            observer.end_opportunity_finder()

    def loop(self):
        while True:
            self.updated_markets = self.update_depths()
            self.tickers()
            self.tick()
            time.sleep(config.refresh_rate)
コード例 #28
0
    utc = utc[:-6]  # ignore timezone
    soup.decompose()
    return [url, utc, loc]


#def process_user(username, fullname):
#    print u'creating thread for user; username={}; full name={}'.format(username, fullname)
#    result = get_user(username, fullname)
#    f = open('github/{}.json'.format(username), 'w')
#    f.write(json.dumps(result, indent=4))
#    f.close()

if __name__ == '__main__':
    from guppy import hpy
    h = hpy()
    executor = ThreadPoolExecutor(max_workers=THREADS)
    thread = None
    for subdirs, dirs, files in os.walk('stackoverflow/'):
        i = 0
        for filename in files:
            username = filename[:-5]
            github_filename = 'github/{}.csv'.format(username)
            if os.path.isfile('{}.tmp'.format(github_filename)):
                os.remove('{}.tmp'.format(github_filename))
            if os.path.isfile(github_filename):
                print u"skip {}".format(username)
                continue
            f = codecs.open('stackoverflow/{}'.format(filename), 'r', 'utf-8')
            data = json.load(f)
            f.close()
            fullname = data['answerer']['name']
コード例 #29
0
    except easywebdav.client.OperationFailed:
        print ('Корень существует.Продолжаем.')
        
    #Проверка аналогичного файла
    try:
        webdav.mkdir(mainfolder+'/'+name)
    except easywebdav.client.OperationFailed:
        print('Папка существует.Поменяйте имя файла на другое.')
        sys.exit()

    #Скрипт загрузки
    #Создание главных папок
        
    print ('Создание главных папок.')

    with ThreadPoolExecutor(max_workers = maxconnect) as poolglav:
        results = [poolglav.submit(mkdirglav, nomerpapok) for nomerpapok in range(1,glav+1)]

    #Создание внутренних папок(костыль,позже реализовать через пул)

    print ('Создание внутренних папок.')

    if size > maxname:
        for a in range(0,vspom-1):
            for dlinaimeni in range (0,i):
                list.append(outsymbol(byte_file(proidennoe+dlinaimeni,1,file)[0]))
            s = "".join([str(list[uli]) for uli in range(len(list))])
            #poolvspom.submit(mkdirvspom,s)
            #poolvspom.shutdown
            pot=threading.Thread(target=mkdirvspom,args=[s])
            threads.append(pot)