Пример #1
0
    def test_pause_stop(self):
        """
        Verify that the pause method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we,
        pause and that stop works when paused.
        """
        core_start = Process(target=self.w3afcore.start, name="TestRunner")
        core_start.daemon = True
        core_start.start()

        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_pause = self.count_plugin.count
        self.assertGreater(self.count_plugin.count, 0)

        # Pause and measure
        self.w3afcore.pause(True)
        count_after_pause = self.count_plugin.count

        time.sleep(2)
        count_after_sleep = self.count_plugin.count

        all_equal = count_before_pause == count_after_pause == count_after_sleep

        self.assertTrue(all_equal)

        # Unpause and verify that all requests were sent
        self.w3afcore.stop()
        core_start.join()

        # No more requests sent after pause
        self.assertEqual(self.count_plugin.count, count_after_sleep)
Пример #2
0
def haunt(tMin,tMax):
    configure()
    global hauntMode
    global duration
    running = 1
    timer_start = time.time()
    while running:
        schedule = random.randint(tMin,tMax)
        if duration > 0:
            if time.time() - timer_start + schedule >= duration:
                schedule = duration - (time.time() - timer_start)
                running = 0
        time.sleep(schedule)
        if hauntMode == 0:
            randomNoise()
        if hauntMode == 1:
            coinflip = random.randint(1,2)
            if coinflip == 1:
                randomNoise()
            if coinflip == 2:
                relayTrigger()
        if hauntMode == 2:
            doSound = Process(target = randomNoise)
            doRelay = Process(target = relayTrigger)
            doSound.start()
            doRelay.start()
            doSound.join()
            doRelay.join()
        if duration > 0:
            if time.time() - timer_start >= duration:
                running = 0
Пример #3
0
def process_updates():
    """
    Decides which type the update is and routes it to the appropriate route_updates
    method and launches a thread for the run_extensions method.
    """
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    plugin_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                                      ca_certs=certifi.where())
    plugin_http.timeout = urllib3.Timeout(connect=1.0)
    plugin_http.retries = 3
    update_router = RouteMessage(PLUGINS, plugin_http, GET_ME, CONFIG)
    while RUNNING.value:
        try:
            update = MESSAGE_QUEUE.get_nowait()
        except queue.Empty:
            time.sleep(SLEEP_TIME)
            continue
        extension_thread = ThreadProcess(target=run_extensions,
                                         args=(update, ))
        extension_thread.start()
        if 'message' in update:
            update_router.route_update(update['message'])
        elif 'edited_message' in update:
            update_router.route_update(update['edited_message'])
        elif 'callback_query' in update:
            route_callback_query(PLUGINS, GET_ME, CONFIG, plugin_http,
                                 update['callback_query'])
        elif 'inline_query' in update:
            route_inline_query(PLUGINS, GET_ME, CONFIG, plugin_http,
                               update['inline_query'])
        extension_thread.join()
Пример #4
0
    def test_stop(self):
        """
        Verify that the stop method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we
        stop().

        This test seems to be failing @ CircleCI because of a test dependency
        issue. If run alone in your workstation it will PASS, but if run at
        CircleCI the count plugin doesn't seem to start.
        """
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()

        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_stop = self.count_plugin.count
        self.assertGreater(count_before_stop, 0)

        # Stop now,
        self.w3afcore.stop()
        core_start.join()

        count_after_stop = self.count_plugin.count

        self.assertEqual(count_after_stop, count_before_stop)
Пример #5
0
    def test_pause_unpause(self):
        """
        Verify that the pause method actually works. In this case, working
        means that the process doesn't send any more HTTP requests, fact
        that is verified with the "fake" count plugin.
        """
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()

        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_pause = self.count_plugin.count
        self.assertGreater(self.count_plugin.count, 0)

        # Pause and measure
        self.w3afcore.pause(True)
        count_after_pause = self.count_plugin.count

        time.sleep(2)
        count_after_sleep = self.count_plugin.count

        all_equal = count_before_pause == count_after_pause == count_after_sleep

        self.assertTrue(all_equal)

        # Unpause and verify that all requests were sent
        self.w3afcore.pause(False)
        core_start.join()

        self.assertEqual(self.count_plugin.count, self.count_plugin.loops)
Пример #6
0
def main():
    """
    Creates instances of the above methods and occassionally checks for crashed
    worker processes & relaunches.
    """
    worker_process = list()
    get_update_process = Process(target=get_updates)
    get_update_process.start()
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = ThreadProcess(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = ThreadProcess(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()
Пример #7
0
class Ticker(object):
    def __init__(self, api, interval=1):
        self.api = api
        self.db = MongoClient().poloniex['ticker']
        self.interval = interval

    def updateTicker(self):
        tick = self.api.returnTicker()
        for market in tick:
            self.db.update_one({'_id': market}, {'$set': tick[market]},
                               upsert=True)
        logger.info('Ticker updated')

    def __call__(self):
        return list(self.db.find())

    def run(self):
        self._running = True
        while self._running:
            self.updateTicker()
            sleep(self.interval)

    def start(self):
        self._thread = Thread(target=self.run)
        self._thread.daemon = True
        self._thread.start()
        logger.info('Ticker started')

    def stop(self):
        self._running = False
        self._thread.join()
        logger.info('Ticker stopped')
Пример #8
0
def send_emails(modeladmin, request, queryset):
    messages = Queue()
    for user in queryset:
        process = Process(target=send_email, args=(user, messages))
        process.start()
        messages.get().send()
        process.join()
Пример #9
0
    def test_pause_stop(self):
        '''
        Verify that the pause method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we,
        pause and that stop works when paused.
        '''
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()
        
        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_pause = self.count_plugin.count
        self.assertGreater(self.count_plugin.count, 0)
        
        # Pause and measure
        self.w3afcore.pause(True)
        count_after_pause = self.count_plugin.count
        
        time.sleep(2)
        count_after_sleep = self.count_plugin.count
        
        all_equal = count_before_pause == count_after_pause == count_after_sleep
        
        self.assertTrue(all_equal)

        # Unpause and verify that all requests were sent
        self.w3afcore.stop()
        core_start.join()
        
        # No more requests sent after pause
        self.assertEqual(self.count_plugin.count, count_after_sleep)
Пример #10
0
    def test_pause_unpause(self):
        output = Queue.Queue()
        self.uri_opener.pause(True)

        def send(uri_opener, output):
            url = URL(get_moth_http())
            try:
                http_response = uri_opener.GET(url)
                output.put(http_response)
            except:
                output.put(None)

        th = Process(target=send, args=(self.uri_opener, output))
        th.daemon = True
        th.start()

        self.assertRaises(Queue.Empty, output.get, True, 2)

        self.uri_opener.pause(False)

        http_response = output.get()
        self.assertNotIsInstance(http_response, types.NoneType,
                                 'Error in send thread.')
        
        th.join()
        
        self.assertEqual(http_response.get_code(), 200)
        self.assertIn(self.MOTH_MESSAGE, http_response.body)
Пример #11
0
def process_updates():
    """
    Decides which type the update is and routes it to the appropriate route_updates
    method and launches a thread for the run_extensions method.
    """
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    plugin_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
    plugin_http.timeout = urllib3.Timeout(connect=1.0)
    plugin_http.retries = 3
    update_router = RouteMessage(PLUGINS, plugin_http, GET_ME, CONFIG)
    while RUNNING.value:
        try:
            update = MESSAGE_QUEUE.get_nowait()
        except queue.Empty:
            time.sleep(SLEEP_TIME)
            continue
        extension_thread = ThreadProcess(target=run_extensions, args=(update, ))
        extension_thread.start()
        if 'message' in update:
            update_router.route_update(update['message'])
        elif 'edited_message' in update:
            update_router.route_update(update['edited_message'])
        elif 'callback_query' in update:
            route_callback_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['callback_query'])
        elif 'inline_query' in update:
            route_inline_query(PLUGINS, GET_ME, CONFIG, plugin_http, update['inline_query'])
        extension_thread.join()
Пример #12
0
def main():
    """
    Creates instances of the above methods and occassionally checks for crashed
    worker processes & relaunches.
    """
    worker_process = list()
    get_update_process = Process(target=get_updates)
    get_update_process.start()
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = ThreadProcess(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = ThreadProcess(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()
Пример #13
0
    def test_pause_unpause(self):
        output = Queue.Queue()
        self.uri_opener.pause(True)

        def send(uri_opener, output):
            url = URL(get_moth_http())
            try:
                http_response = uri_opener.GET(url)
                output.put(http_response)
            except:
                output.put(None)

        th = Process(target=send, args=(self.uri_opener, output))
        th.daemon = True
        th.start()

        self.assertRaises(Queue.Empty, output.get, True, 2)

        self.uri_opener.pause(False)

        http_response = output.get()
        self.assertNotIsInstance(http_response, types.NoneType,
                                 'Error in send thread.')

        th.join()

        self.assertEqual(http_response.get_code(), 200)
        self.assertIn(self.MOTH_MESSAGE, http_response.body)
Пример #14
0
    def test_stop(self):
        """
        Verify that the stop method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we
        stop().

        This test seems to be failing @ CircleCI because of a test dependency
        issue. If run alone in your workstation it will PASS, but if run at
        CircleCI the count plugin doesn't seem to start.
        """
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()
        
        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_stop = self.count_plugin.count
        self.assertGreater(count_before_stop, 0)
        
        # Stop now,
        self.w3afcore.stop()
        core_start.join()

        count_after_stop = self.count_plugin.count
        
        self.assertEqual(count_after_stop, count_before_stop)
Пример #15
0
    def test_pause_unpause(self):
        """
        Verify that the pause method actually works. In this case, working
        means that the process doesn't send any more HTTP requests, fact
        that is verified with the "fake" count plugin.
        """        
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()
        
        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_pause = self.count_plugin.count
        self.assertGreater(self.count_plugin.count, 0)
        
        # Pause and measure
        self.w3afcore.pause(True)
        count_after_pause = self.count_plugin.count
        
        time.sleep(2)
        count_after_sleep = self.count_plugin.count
        
        all_equal = count_before_pause == count_after_pause == count_after_sleep
        
        self.assertTrue(all_equal)

        # Unpause and verify that all requests were sent
        self.w3afcore.pause(False)
        core_start.join()
        
        self.assertEqual(self.count_plugin.count, self.count_plugin.loops)
Пример #16
0
 def run(self):
     jobs = []
     for i in range(int(ceil(self.count_vacancy / 25))):
         p = Process(target=self.get_graph_maker())
         jobs.append(p)
         p.start()
         p.join()
Пример #17
0
class Ticker(object):

    def __init__(self, api, interval=1):
        self.api = api
        self.db = MongoClient().poloniex['ticker']
        self.interval = interval

    def updateTicker(self):
        tick = self.api.returnTicker()
        for market in tick:
            self.db.update_one({'_id': market},
                               {'$set': tick[market]},
                               upsert=True)
        logger.info('Ticker updated')

    def __call__(self):
        return list(self.db.find())

    def run(self):
        self._running = True
        while self._running:
            self.updateTicker()
            sleep(self.interval)

    def start(self):
        self._thread = Thread(target=self.run)
        self._thread.daemon = True
        self._thread.start()
        logger.info('Ticker started')

    def stop(self):
        self._running = False
        self._thread.join()
        logger.info('Ticker stopped')
Пример #18
0
 def report(pk, num):
     # hacky way to enforce db commit from outside the atomic transaction
     # spawning a separate celery task won't work as celery will queue 
     # spawns after the commit of the underlying transaction.
     from multiprocessing.dummy import Process
     p = Process(target=update_status, args=(pk, num))
     p.start()
     p.join()
Пример #19
0
 def manager(self):
     try:
         putter_process = Process(target=self.put_queue)
         getter_process = Process(target=self.get_queue)
         putter_process.start()
         getter_process.start()
         putter_process.join()
     except Exception as e:
         raise Exception(e.args[0])
Пример #20
0
class Ticker(object):
    def __init__(self):
        self.ticker = poloniex.Poloniex().returnTicker()
        self._appRunner = ApplicationRunner(u"wss://api.poloniex.com:443",
                                            u"realm1")
        self._appProcess, self._tickThread = None, None
        self._running = False

    def __call__(self):
        return self.ticker

    def tickCatcher(self):
        print("Catching...")
        while self._running:
            try:
                tick = queue.get(timeout=1)
            except:
                continue
            else:
                self.ticker[tick[0]] = {
                    'last': tick[1],
                    'lowestAsk': tick[2],
                    'highestBid': tick[3],
                    'percentChange': tick[4],
                    'baseVolume': tick[5],
                    'quoteVolume': tick[6],
                    'isFrozen': tick[7],
                    'high24hr': tick[8],
                    'low24hr': tick[9],
                    'id': self.ticker[tick[0]]['id']
                }
        print("Done catching...")

    def start(self):
        """ Start the ticker """
        print("Starting ticker")
        self._appProcess = Process(target=self._appRunner.run,
                                   args=(TickPitcher, ))
        self._appProcess.daemon = True
        self._appProcess.start()
        self._running = True
        print('TICKER: tickPitcher process started')
        self._tickThread = Thread(target=self.tickCatcher)
        self._tickThread.deamon = True
        self._tickThread.start()
        print('TICKER: tickCatcher thread started')

    def stop(self):
        """ Stop the ticker """
        print("Stopping ticker")
        self._appProcess.terminate()
        print("Joining Process")
        self._appProcess.join()
        print("Joining thread")
        self._running = False
        self._tickThread.join()
        print("Ticker stopped.")
Пример #21
0
def train(epoch):

    print('Epoch: %d' % epoch)

    def backward():

        time.sleep(2)

        batch_idx = 0
        train_loss = 0
        correct = 0
        total = 0
        global epoch_loss

        while True:
            optimizer.zero_grad()
            try:
                outputs, targets = output_queue.get(block=True,
                                                    timeout=args.wait)
            except Empty as e:
                print("done.....")
                epoch_loss = (train_loss / (batch_idx + 1))
                break
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(
                batch_idx, len(trainloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (train_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))
            batch_idx += 1

    net.train()

    start_flag = True
    first_count = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.cuda(0), targets.to(1)
        outputs = net(inputs)
        if first_count < args.count:
            first_count += 1
            continue
        output_queue.put([outputs, targets])
        if start_flag and output_queue.qsize() > args.wait:  #2
            start_flag = False
            back_process = Process(target=backward)
            back_process.start()

    back_process.join()
Пример #22
0
def multiprocessing():
    # 启动一个子进程并等待其结束
    print('Parent process %s.' % os.getpid())
    # 创建子进程时,只需要传入一个执行函数和函数的参数
    p = Process(target=run_child_process, args=('子进程', ))
    print('Process start')
    p.start()
    # join()方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步
    p.join()
    print('Process end.')
Пример #23
0
def scoreDuplicates(record_pairs: RecordPairs,
                    data_model,
                    classifier,
                    num_cores: int = 1):
    if num_cores < 2:
        from multiprocessing.dummy import Process, Queue
        SimpleQueue = Queue
    else:
        from .backport import Process, SimpleQueue, Queue  # type: ignore

    first, record_pairs = peek(record_pairs)
    if first is None:
        raise BlockingError("No records have been blocked together. "
                            "Is the data you are trying to match like "
                            "the data you trained on?")

    record_pairs_queue: _Queue = Queue(2)
    score_queue: _SimpleQueue = SimpleQueue()
    result_queue: _SimpleQueue = SimpleQueue()

    n_map_processes = max(num_cores, 1)
    score_records = ScoreDupes(data_model, classifier, record_pairs_queue,
                               score_queue)
    map_processes = [
        Process(target=score_records) for _ in range(n_map_processes)
    ]

    for process in map_processes:
        process.start()

    reduce_process = Process(target=mergeScores,
                             args=(score_queue, result_queue, n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, record_pairs, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception):
        raise ChildProcessError

    if result:
        scored_pairs_file, dtype, size = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    dtype=dtype,
                                    shape=(size, ))
    else:
        dtype = numpy.dtype([('pairs', object, 2), ('score', 'f4', 1)])
        scored_pairs = numpy.array([], dtype=dtype)

    reduce_process.join()

    for process in map_processes:
        process.join()

    return scored_pairs
Пример #24
0
def run_both():
    run = 20
    for i in range(run):
        receiver = Process(target=run_receiver)
        sender = Process(target=run_sender)
        receiver.start()
        sender.start()
        receiver.join()
        sender.join()

        time.sleep(1)
Пример #25
0
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0):
    if num_cores < 2:
        from multiprocessing.dummy import Process, Queue
        SimpleQueue = Queue
    else:
        from .backport import Process, SimpleQueue, Queue

    first, records = peek(records)
    if first is None:
        raise BlockingError("No records have been blocked together. "
                            "Is the data you are trying to match like "
                            "the data you trained on?")

    record_pairs_queue = Queue(2)
    score_queue = SimpleQueue()
    result_queue = SimpleQueue()

    n_map_processes = max(num_cores, 1)
    score_records = ScoreDupes(data_model, classifier, threshold)
    map_processes = [Process(target=score_records,
                             args=(record_pairs_queue,
                                   score_queue))
                     for _ in range(n_map_processes)]
    [process.start() for process in map_processes]

    reduce_process = Process(target=mergeScores,
                             args=(score_queue,
                                   result_queue,
                                   n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, records, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception):
        raise ChildProcessError

    if result:
        scored_pairs_file, dtype, size = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    dtype=dtype,
                                    shape=(size,))
    else:
        dtype = numpy.dtype([('pairs', object, 2),
                             ('score', 'f4', 1)])
        scored_pairs = numpy.array([], dtype=dtype)

    reduce_process.join()
    [process.join() for process in map_processes]

    return scored_pairs
Пример #26
0
def _add_doi(metadata, identifier, citekey):
    """Add an entry from a DOI."""
    info_messages = []
    with StatusMessage('Querying DOI metadata...') as message:
        if metadata.doi_exists(identifier):
            raise ZoiaAddException(f'DOI {identifier} already exists.')

        # Query Semantic Scholar to get the corresponding arxiv ID (if there is
        # one) in a separate thread.
        arxiv_queue = ThreadQueue()
        arxiv_process = ThreadProcess(
            target=lambda q, x: q.put(requests.get(x)),
            args=(
                arxiv_queue,
                f'https://api.semanticscholar.org/v1/paper/{identifier}',
            ),
        )
        arxiv_process.start()

        doi_metadata = _get_doi_metadata(identifier)

        metadatum = zoia.backend.metadata.Metadatum.from_dict(doi_metadata)

        if citekey is None:
            citekey = zoia.parse.citekey.create_citekey(metadata, metadatum)

        paper_dir = os.path.join(metadata.config.library_root, citekey)
        os.mkdir(paper_dir)

        message.update(
            'Querying Semantic Scholar for corresponding arXiv ID...')
        arxiv_metadata_response = arxiv_queue.get()
        arxiv_process.join()

        arxiv_metadata = json.loads(arxiv_metadata_response.text)

        if (arxiv_id := arxiv_metadata.get('arxivId')) is not None:
            doi_metadata['arxiv_id'] = arxiv_id
            message.update('Downloading PDF from arXiv...')
            pdf_response = requests.get(
                f'https://arxiv.org/pdf/{arxiv_id}.pdf')

            if pdf_response.status_code == 200:
                with open(os.path.join(paper_dir, 'document.pdf'), 'wb') as fp:
                    fp.write(pdf_response.content)
                doi_metadata['pdf_md5'] = hashlib.md5(
                    pdf_response.content).hexdigest()
            else:
                info_messages.append('Was unable to fetch a PDF')

        metadata[citekey] = doi_metadata
Пример #27
0
def main(q=None):
    """
    Try to detect relics on the screen and show if they can be detected in a window.

    For some weird reason, Qt and tesseract cannot run in the same thread.
    Doing so leads to a crash and I'm unable to figure out the cause.
    We workaround this by creating a thread for Qt and putting all ocr result into a queue.
    :param q: A queue for communication with the Qt thread
    """
    if q is not None:
        app = QApplication(sys.argv)
        widget = Widget(q)
        sys.exit(app.exec_())

    q = Queue(1)
    p = Process(target=main, args=(q, ))
    p.start()

    tessdata_dir = 'tessdata/'
    with TesserocrPool(tessdata_dir,
                       'Roboto',
                       psm=PSM.SINGLE_BLOCK,
                       oem=OEM.LSTM_ONLY) as pool, mss.mss() as sct:
        s = Screenshots(sct)
        while p.is_alive():
            begin = time.time()
            image_input = next(s)
            end = time.time()
            delta = end - begin
            print(f'screenshot took {delta}s')

            try:
                ocr_data = do_ocr(pool, image_input)
            except:
                ocr_data = None

            if ocr_data is None:
                ocr_data = itertools.repeat(('ocrerror', ) * 4, 20)

            try:
                q.put(tuple(ocr_data), block=True, timeout=0.5)
            except QueueFullException:
                if not p.is_alive():
                    break
            except ValueError as e:
                pass
            except AssertionError as e:
                pass

    p.join()
Пример #28
0
class Ticker(object):
    """ Ticker object for controlling the ticker thread and subprocess
		Holds poloniex ticker dict under self.markets"""
    def __init__(self):
        self._tickerP, self._tickerT = [None, None]
        self.markets = poloniex.Poloniex().marketTicker()

    def startTicker(self):
        """ Starts the 'tickcatcher' subprocess and 'tickCatcher' thread"""
        self._tickerP = Popen(["python", "tickcatcher.py"],
                              stdout=PIPE,
                              bufsize=1)
        print('TICKER: tickcatcher subprocess started')

        self._tickerT = Thread(target=self.tickCatcher)
        self._tickerT.daemon = True
        self._tickerT.start()
        print('TICKER: tickCatcher thread started')

    def stopTicker(self):
        """ Stops the ticker subprocess"""
        self._tickerP.terminate()
        self._tickerP.kill()
        print('TICKER: Ticker subprocess stopped')
        self._tickerT.join()
        print('TICKER: Ticker thread joined')

    def tickCatcher(self):
        with self._tickerP.stdout:
            for line in iter(self._tickerP.stdout.readline, b''):
                try:
                    tick = json.loads(
                        line[25:]
                    )  # shave off twisted timestamp (probably a better way to remove the timestamp...)
                    self.markets[tick[0]] = {
                        'last': tick[1],
                        'lowestAsk': tick[2],
                        'highestBid': tick[3],
                        'percentChange': tick[4],
                        'baseVolume': tick[5],
                        'quoteVolume': tick[6],
                        'isFrozen': tick[7],
                        'high24hr': tick[8],
                        'low24hr': tick[9],
                        'id': self.markets[tick[0]]['id']
                    }
                except Exception as e:
                    print(e)

        self._tickerP.wait()
Пример #29
0
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0) :
    if num_cores < 2 :
        from multiprocessing.dummy import Process, Queue
        SimpleQueue = Queue
    else :
        from .backport import Process, SimpleQueue, Queue

    first, records = peek(records)
    if first is None:
        raise ValueError("No records have been blocked together. "
                         "Is the data you are trying to match like "
                         "the data you trained on?")

    record_pairs_queue = Queue(2)
    score_queue =  SimpleQueue()
    result_queue = SimpleQueue()

    n_map_processes = max(num_cores-1, 1)
    score_records = ScoreRecords(data_model, classifier, threshold) 
    map_processes = [Process(target=score_records,
                             args=(record_pairs_queue,
                                   score_queue))
                     for _ in range(n_map_processes)]
    [process.start() for process in map_processes]

    reduce_process = Process(target=mergeScores,
                             args=(score_queue,
                                   result_queue,
                                   n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, records, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception) :
        raise ChildProcessError

    if result :
        scored_pairs_file, dtype, size = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    mode='r',
                                    dtype=dtype,
                                    shape=(size,))
    else:
        scored_pairs = []

    reduce_process.join()
    [process.join() for process in map_processes]

    return scored_pairs
Пример #30
0
def get_stats():
	print 'Fetching NBA player stats...'
	stats_outfile = RUNDAY+'_nba_stats.csv'
	csvout = open(stats_outfile, 'wb')

	NUM_THREADS = 8

	in_queue = Queue()
	out_queue = Queue()
	queue_players(in_queue)

	while not in_queue.empty():	
		jobs = []

		for i in range(NUM_THREADS):
			if not in_queue.empty():
				thread = Process(target=get_stats_helper, args=(in_queue, out_queue))
				jobs.append(thread)
				thread.start()
		for thread in jobs:
			thread.join()	

		while not out_queue.empty():
			player = out_queue.get()
			del player['SUCCESS']
			try: 
				name = player['NAME']
			except KeyError as e:
				continue
			player['TIME'] = RUNDAY
			fieldnames = [
				'TIME',
				'NAME', 
				'JERSEY',
				'SPORT',
				'TEAM',
				'POSITION',
				'PTS',
				'REB',
				'AST',
				'URL'
			]
		
			csvwriter = csv.DictWriter(csvout, delimiter='|', fieldnames=fieldnames)
			csvwriter.writerow(player)
	csvout.close()

	print 'Finished fetching NBA player stats.'
	print 'Ouput saved in %s' % stats_outfile
Пример #31
0
    def __init__(self):

        pool = Pool(processes=2)
        self.graph = getGraph()

        files = findFiles(opts)

        self.progressQueue = Queue()
        reporter = Process(target=ProgressReport,
                           args=(self.progressQueue, len(files)))
        reporter.start()
        result = pool.map(self.cacheFile, enumerate(files), chunksize=5)
        self.progressQueue.put('END')
        log.info("finished, %s results", len(result))
        reporter.join()
Пример #32
0
    def __init__(self):

        pool = Pool(processes=2)
        self.graph = getGraph()

        files = findFiles(opts)

        self.progressQueue = Queue()
        reporter = Process(target=ProgressReport,
                           args=(self.progressQueue, len(files)))
        reporter.start()
        result = pool.map(self.cacheFile, enumerate(files), chunksize=5)
        self.progressQueue.put('END')
        log.info("finished, %s results", len(result))
        reporter.join()
Пример #33
0
    def serve(self):
        logger = logging.getLogger()
        processes = []

        try:
            while True:
                self.logger.debug('Serve Forever')

                tasks = []
                task = []
                while not self.queue.empty():
                    job = self.queue.get_nowait()
                    task.append(job)
                    if len(task) == self.jobsPerWorker or self.queue.empty():
                        tasks.append(task)
                        task = []

                self.logger.debug('Loaded Tasks')
                while len(tasks) > 0:
                    while len(processes) == self.numWorker:
                        newProcesses = []
                        for p in processes:
                            if not p.is_alive():
                                p.join()
                            else:
                                newProcesses.append(p)
                        processes = newProcesses
                        sleep(1)
                    self.logger.debug('{} Free Worker'.format(self.numWorker -
                                                              len(processes)))

                    for i in range(
                            min(self.numWorker - len(processes), len(tasks))):
                        task = tasks.pop(0)
                        p = Process(target=self.worker, args=(task, ))
                        p.start()
                        self.logger.debug('Start Fresh Worker')
                        processes.append(p)

                sleep(5)
        except (Exception, SystemExit, KeyboardInterrupt) as e:
            logger.exception(e)

        finally:
            for p in processes:
                p.terminate()
                p.join()
            logger.fatal('Spider does not serve anymore')
Пример #34
0
def train(epoch):

    print('Epoch: %d' % epoch)

    def backward():

        time.sleep(2)

        batch_idx = 0
        train_loss = 0
        correct = 0
        total = 0

        while not output_queue.empty():
            optimizer.zero_grad()
            outputs, targets = output_queue.get(block=False)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(
                batch_idx, len(trainloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (train_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))
            batch_idx += 1

    net.train()

    start_flag = True

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = net(inputs)
        output_queue.put([outputs, targets])
        if start_flag and output_queue.qsize() > 2:
            start_flag = False
            back_process = Process(target=backward)
            back_process.start()

    back_process.join()
Пример #35
0
def _add_arxiv_id(metadata, identifier, citekey=None):
    info_messages = []
    with StatusMessage('Querying arXiv...') as message:
        if metadata.arxiv_id_exists(identifier):
            raise ZoiaAddException(f'arXiv paper {identifier} already exists.')

        # Downloading the PDF can take a while, so start it early in a separate
        # thread.
        pdf_queue = ThreadQueue()
        pdf_process = ThreadProcess(
            target=lambda q, x: q.put(requests.get(x)),
            args=(pdf_queue, f'https://arxiv.org/pdf/{identifier}.pdf'),
        )
        pdf_process.start()

        arxiv_metadata = _get_arxiv_metadata(identifier)

        if 'doi' in arxiv_metadata:
            message.update('Querying DOI information...')
            arxiv_metadata.update(_get_doi_metadata(arxiv_metadata['doi']))

        if citekey is None:
            metadatum = zoia.backend.metadata.Metadatum.from_dict(
                arxiv_metadata)
            citekey = zoia.parse.citekey.create_citekey(metadata, metadatum)
        paper_dir = os.path.join(metadata.config.library_root, citekey)
        os.mkdir(paper_dir)

        message.update(text='Downloading PDF...')
        pdf = pdf_queue.get()
        pdf_process.join()

        if pdf.status_code == 200:
            with open(os.path.join(paper_dir, 'document.pdf'), 'wb') as fp:
                fp.write(pdf.content)
            md5_hash = hashlib.md5(pdf.content).hexdigest()
            arxiv_metadata['pdf_md5'] = md5_hash
            if metadata.pdf_md5_hash_exists(md5_hash):
                raise ZoiaAddException(
                    f'arXiv paper {identifier} already exists.')
        else:
            info_messages.append('Was unable to fetch a PDF')

        metadata[citekey] = arxiv_metadata

    return citekey, metadatum, info_messages
Пример #36
0
def send_message(request):
    form = SendEmailForm(request.POST)
    if form.is_valid():
        subject = form.cleaned_data.get('subject')
        text = form.cleaned_data.get('message')
        date = datetime.now()
        users = form.cleaned_data['users']
        data = (subject, text, date)
        #messages.success(request, f'Сообщение отправлено!')
        for item in users:
            messages.put(item)
            user = item.user
        p = Process(target=read_query, args=(request, data))
        p.start()
        p.join()

        return redirect('/admin/user/profile/')
Пример #37
0
def main(test_cases):
    procs = []
    queue = Queue()
    case_number = 0
    for test_case in test_cases:
        proc = Process(target=handle_test_case,
                       args=(test_case, case_number, queue))
        procs.append(proc)
        case_number += 1
    for proc in procs:
        proc.start()
    for proc in procs:
        proc.join()

    results = [queue.get() for proc in procs]
    results.sort()
    for r in results:
        print(r[1])
Пример #38
0
    def run(self):
        """Run and get datas."""
        print(self.fname, len(self.rooms))

        p_re = Process(target=self.record)
        pool = Process(target=self.pool_join)
        pool.start()
        p_re.start()

        for (game, room) in self.rooms:
            p = Process(target=self._run, args=(room, game))
            p.start()
            self.pool_queue.put(p)

        self.pool_queue.put(None)
        pool.join()
        self.msg_queue.put(None)
        return p_re, self.fname
Пример #39
0
class Ticker(object):
	""" Ticker object for controlling the ticker thread and subprocess
		Holds poloniex ticker dict under self.markets"""
	def __init__(self):
		self._tickerP, self._tickerT = [None, None]
		self.markets = poloniex.Poloniex().marketTicker()
		
	def startTicker(self):
		""" Starts the 'tickcatcher' subprocess and 'tickCatcher' thread"""
		self._tickerP = Popen(["python", "tickcatcher.py"], stdout=PIPE, bufsize=1)
		print('TICKER: tickcatcher subprocess started')
		
		self._tickerT = Thread(target=self.tickCatcher);self._tickerT.daemon = True
		self._tickerT.start()
		print('TICKER: tickCatcher thread started')
	
	def stopTicker(self):
		""" Stops the ticker subprocess"""
		self._tickerP.terminate();self._tickerP.kill()
		print('TICKER: Ticker subprocess stopped')
		self._tickerT.join()
		print('TICKER: Ticker thread joined')
	
	def tickCatcher(self):
		with self._tickerP.stdout:
			for line in iter(self._tickerP.stdout.readline, b''):
				try:
					tick = json.loads(line[25:]) # shave off twisted timestamp (probably a better way to remove the timestamp...)
					self.markets[tick[0]] = {
							'last':tick[1], 
							'lowestAsk':tick[2], 
							'highestBid':tick[3], 
							'percentChange':tick[4], 
							'baseVolume':tick[5], 
							'quoteVolume':tick[6], 
							'isFrozen':tick[7], 
							'high24hr':tick[8], 
							'low24hr':tick[9],
							'id':self.markets[tick[0]]['id']
							}
				except Exception as e:
					print(e)
				
		self._tickerP.wait()
Пример #40
0
def parallel_parsing(directory):
    trees = list()
    input_files = [java_file for java_file in get_java_files(directory)]
    # pool = Pool(processes=8)
    # x = pool.apply_async(create_project_parse_tree, args=(input_files[0], trees))

    # for java_file in get_java_files(directory):
    #     res = pool.apply_async(create_project_parse_tree, (java_file,))  # runs in *only* one process
    #     print(type(res.get(timeout=1)))  #

    # num = Value('d', 0.0)
    # arr = Array('i', range(500))
    with Manager() as manager:
        d = manager.dict()
        q = manager.Queue(10)
        p = Process(target=create_project_parse_tree, args=(input_files[0], q))
        p.start()
        p.join()
        print(q)
Пример #41
0
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0) :
    if num_cores < 2 :
        from multiprocessing.dummy import Process, Pool, Queue
        SimpleQueue = Queue
    else :
        from .backport import Process, Pool, SimpleQueue

    record_pairs_queue = SimpleQueue()
    score_queue =  SimpleQueue()
    result_queue = SimpleQueue()

    n_map_processes = max(num_cores-1, 1)
    score_records = ScoreRecords(data_model, classifier, threshold) 
    map_processes = [Process(target=score_records,
                             args=(record_pairs_queue,
                                   score_queue))
                     for _ in range(n_map_processes)]
    [process.start() for process in map_processes]

    reduce_process = Process(target=mergeScores,
                             args=(score_queue,
                                   result_queue,
                                   n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, records, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception) :
        raise ChildProcessError

    if result :
        scored_pairs_file, dtype = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    dtype=dtype)
    else :
        scored_pairs = result

    reduce_process.join()
    [process.join() for process in map_processes]

    return scored_pairs
Пример #42
0
def stat_files():
	all_files = []
	for root, dirs, files in os.walk('/home/gzguoyubo/mf/tw2/res/entities/custom_type'):
		ignore = False
		for ig_path in ignore_paths:
			if ig_path in root:
				ignore = True
		if ignore:
			continue
		for fname in files:
			if not fname.endswith('.py'):
				continue
			abs_file_path = join(root, fname)
			all_files.append(abs_file_path)
	
	file_sections = []
	file_total_nums = len(all_files)
	for i in xrange(P_NUM):
		start = i * file_total_nums / P_NUM
		stop = start + file_total_nums / P_NUM
		if i == P_NUM - 1:
			stop = -1
		file_sections.append(all_files[start : stop])

	res_queue = Queue()
	processes = []
	for section in file_sections:
		p = Process(target=stat_file, args=(section, res_queue))
		p.start()
		processes.append(p)
	
	for p in processes:
		p.join()
	
	total_stats = defaultdict(int)
	while not res_queue.empty():
		stat = res_queue.get()
		for author, cnt in stat.iteritems():
			total_stats[author] += cnt
	
	print total_stats
Пример #43
0
    def test_stop(self):
        """
        Verify that the stop method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we
        stop().
        """
        core_start = Process(target=self.w3afcore.start, name="TestRunner")
        core_start.daemon = True
        core_start.start()

        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_stop = self.count_plugin.count
        self.assertGreater(count_before_stop, 0)

        # Stop now,
        self.w3afcore.stop()
        core_start.join()

        count_after_stop = self.count_plugin.count

        self.assertEqual(count_after_stop, count_before_stop)
Пример #44
0
    def test_pause_unpause(self):
        output = Queue.Queue()
        self.uri_opener.pause(True)

        def send(uri_opener, output):
            url = URL('http://moth/')
            http_response = uri_opener.GET(url)
            output.put(http_response)

        th = Process(target=send, args=(self.uri_opener, output))
        th.daemon = True
        th.start()

        self.assertRaises(Queue.Empty, output.get, True, 2)

        self.uri_opener.pause(False)

        http_response = output.get()
        th.join()
        
        self.assertEqual(http_response.get_code(), 200)
        self.assertIn(self.MOTH_MESSAGE, http_response.body)
Пример #45
0
    def test_pause_stop(self):
        """
        Verify that the pause method actually works. In this case, working
        means that the process doesn't send any more HTTP requests after we,
        pause and that stop works when paused.

        This test seems to be failing @ CircleCI because of a test dependency
        issue. If run alone in your workstation it will PASS, but if run at
        CircleCI the count plugin doesn't seem to start.
        """
        core_start = Process(target=self.w3afcore.start, name='TestRunner')
        core_start.daemon = True
        core_start.start()
        
        # Let the core start, and the count plugin send some requests.
        time.sleep(5)
        count_before_pause = self.count_plugin.count
        self.assertGreater(self.count_plugin.count, 0)
        
        # Pause and measure
        self.w3afcore.pause(True)
        count_after_pause = self.count_plugin.count
        
        time.sleep(2)
        count_after_sleep = self.count_plugin.count
        
        all_equal = count_before_pause == count_after_pause == count_after_sleep
        
        self.assertTrue(all_equal)

        # Unpause and verify that all requests were sent
        self.w3afcore.stop()
        core_start.join()
        
        # No more requests sent after pause
        self.assertEqual(self.count_plugin.count, count_after_sleep)
Пример #46
0
class Loaner(object):
	""" Object for control of threaded Loaner loop"""
	def __init__(self, Key, Secret, interval=60*2, ageLimit=60*5, offset=2):
		"""
		- <Key> - Polo Api key
		- <Secret> - Polo Api secret
		- Loaner.INTERVAL = time in sec to wait between updates [default= 2min]
		- Loaner.AGELIMIT = max age (in sec) for an open loan offer [default= 5min]
		- Loaner.OFFSET = offset from the top loan offer rate (offset*0.000001) [default= 2]
		- Loaner.CHECKINT = number of times to check Loaner.RUNNING between intervals (could be hard on cpu if set too high and INTERVAL set too low!) [default= 20]
		- Loaner.MINAMOUNT = Minimum amount for creating loan offers [default= 0.01]
		"""
		self.POLO = Poloniex(Key, Secret)
		self.INTERVAL, self.AGELIMIT, self.OFFSET, self.CHECKINT, self.MINAMOUNT, self._running, self._thread = [interval, ageLimit, offset, 20, 0.01, False, None]
	
	def _run(self):
		""" Main loop that is threaded (set Loaner.RUNNING to 'False' to stop loop)"""
		while self._running:
			try:
				self.cancelOldLoans(self.POLO.myOpenLoanOrders(), self.AGELIMIT)
				self.createLoans(self.POLO.myAvailBalances(), self.OFFSET)
				for i in range(self.CHECKINT):
					if not self._running: break
					time.sleep(self.INTERVAL/self.CHECKINT)
			except Exception as e:
				logging.info(e);time.sleep(self.INTERVAL/self.CHECKINT)
	
	def start(self):
		""" Start Loaner.thread"""
		self._thread = Thread(target=self._run);self._thread.daemon = True
		self._running = True;self._thread.start()
		logging.info('LOANER: started')
	
	def stop(self):
		""" Stop Loaner.thread"""
		self._running = False;self._thread.join()
		logging.info('LOANER: stopped')
	
	def cancelOldLoans(self, orderList, ageLimit):
		""" Cancel loans in <orderList> that are older than <ageLimit>
			- orderList = JSON object received from poloniex (open loan orders)
			- ageLimit = max age to allow an order to sit still before canceling (in seconds)""" 
		logging.info('LOANER: Checking for stale offers')
		for market in orderList:
			for order in orderList[market]:
				logging.info('LOANER: %s order %s has been open %f2 mins' % (market, str(order['id']), round((time.time()-self.POLO.UTCstr2epoch(order['date']))/60, 2)))
				if time.time()-self.POLO.UTCstr2epoch(order['date']) > ageLimit:
					result = self.POLO.cancelLoanOrder(order['id'])
					if not 'error' in result: logging.info('LOANER: %s %s [%s]' % (market, result["message"].lower(), str(order['id'])))
					else: logging.info('LOANER: %s' % result['error'])
	
	def createLoans(self, balances, offset):
		""" Create loans for all markets in <balances> at the <offset> from the top rate
			- balances = JSON object received from poloniex (available balances)
			- offset = number of 'loanToshis' to offset from the top loan order (offset*0.000001)""" 
		if 'lending' in balances:
			logging.info('LOANER: Checking for coins in lending account')
			for market in balances['lending']:
				if float(balances['lending'][market]) > self.MINAMOUNT:
					result = self.POLO.createLoanOrder(market, balances['lending'][market], float(self.POLO.marketLoans(market)['offers'][0]['rate'])+(offset*0.000001))
					if not 'error' in result: logging.info('LOANER: %s %s %s' % (balances['lending'][market], market, result["message"].lower()))
					else: logging.info('LOANER: %s' % result['error'])
Пример #47
0
            pref_add_db.set_index(['username'], inplace=True)


    HOST, PORT = "", 9999
    server = Server((HOST, PORT), RequestHandler)
    ip, port = server.server_address
    server_thread = threading.Thread(target=server.serve_forever)
    server_thread.daemon = True
    server_thread.start()

    try:
        signal.pause()
    except:
        server.shutdown()
        server.server_close()
        worker_hand_p1.terminate()
        worker_face_p1.terminate()

        worker_hand_p1.join()
        worker_face_p1.join()

        dummycontinue = False
        worker_handres_mailman.join()
        worker_faceres_mailman.join()

        worker_pref_writeman.join()
        worker_svm_trainer.join()
        with open('./profiles/profiles_add.pkl', 'wb') as pref_add_fd:
            pkl.dump(pref_add_db, pref_add_fd)

    def test_glance_user_storage_quota_bypass_1_2(self, glance_remote, suffix,
                                                  env, os_conn):
        """If deleting images in 'saving' status, storage quota is overcome by
        user because images in deleted state are not taken into account by
        quota. These image files should be deleted after the upload of files
        is completed.

        Scenario:
            1. Set 'file' storage on glance-api.conf
            2. Set 'user_storage_quota' to 604979776 in glance-api.conf
            (a little more than the size of the image) and restart glance-api
            service
            3. Run 5-min cycle which creates image, wait 2 sec and then
            deletes it in "saving" status (and in any other status if any) on
            every iteration
            4. After the end of cycle wait until the upload and deleting images
            is completed
            5. Check that images statuses are "deleted" in mysql database

        Duration 5m
        """
        user_storage_quota = 604979776

        images_size_before = 0
        for img in os_conn.nova.images.list():
            images_size_before += img.to_dict()['OS-EXT-IMG-SIZE:size']
        err_msg_quota = "Glance user storage quota is exceeded"
        assert images_size_before < user_storage_quota, err_msg_quota
        img_from_dir = self.get_images_number_from_dir()
        images_before = len(os_conn.nova.images.list())
        name = "Test_{0}".format(suffix[:6])
        image_url = ("http://releases.ubuntu.com/14.04/"
                     "ubuntu-14.04.4-server-i386.iso")
        file_path = file_cache.get_file_path(image_url)
        start_time = datetime.datetime.now()
        duration = datetime.timedelta(seconds=300)
        stop_time = start_time + duration
        images_id = []

        while 1:
            image = self.os_conn.glance.images.create(name=name,
                                                      disk_format='qcow2',
                                                      container_format='bare')
            p = Process(target=self.os_conn.glance.images.upload,
                        args=(image.id, open(file_path), ))
            p.start()
            time.sleep(2)
            image = self.os_conn.glance.images.get(image.id)
            if image.status == 'saving':
                logger.info("Image status = {0}".format(image.status))
                self.os_conn.glance.images.delete(image.id)
                logger.info("Image {0} is deleted in saving state"
                            .format(image.id))
            else:
                self.os_conn.glance.images.delete(image.id)
            images_id.append(image.id)
            p.join()
            if datetime.datetime.now() >= stop_time:
                break

        controllers = self.env.get_nodes_by_role('controller')
        for controller in controllers:
            with controller.ssh() as remote:
                wait(lambda: len(remote.check_call(
                    'ls /var/lib/glance/images')['stdout']) == img_from_dir[
                    controller.data['fqdn']],
                    timeout_seconds=60,
                    waiting_for='used space to be cleared')

        images_values = self.get_images_values_from_mysql_db(images_id)
        for image_id in images_values:
            image_values = images_values[image_id]
            err_msg = 'Status of image {0} is not deleted'.format(image_id)
            assert "deleted" in image_values, err_msg

        images_size_after = 0
        for img in os_conn.nova.images.list():
            images_size_after += img.to_dict()['OS-EXT-IMG-SIZE:size']
        err_msg = "Glance user storage quota is exceeded"
        assert images_size_after < user_storage_quota, err_msg
        assert images_before == len(os_conn.nova.images.list())
class wsTicker(object):

    def __init__(self, api=None):
        self.api = api
        if not self.api:
            self.api = Poloniex(jsonNums=float)
        self.db = MongoClient().poloniex['ticker']
        self.db.drop()
        self.ws = websocket.WebSocketApp("wss://api2.poloniex.com/",
                                         on_message=self.on_message,
                                         on_error=self.on_error,
                                         on_close=self.on_close)
        self.ws.on_open = self.on_open

    def __call__(self, market=None):
        """ returns ticker from mongodb """
        if market:
            return self.db.find_one({'_id': market})
        return list(self.db.find())

    def on_message(self, ws, message):
        message = json.loads(message)
        if 'error' in message:
            print(message['error'])
            return

        if message[0] == 1002:
            if message[1] == 1:
                print('Subscribed to ticker')
                return

            if message[1] == 0:
                print('Unsubscribed to ticker')
                return

            data = message[2]

            self.db.update_one(
                {"id": float(data[0])},
                {"$set": {'last': data[1],
                          'lowestAsk': data[2],
                          'highestBid': data[3],
                          'percentChange': data[4],
                          'baseVolume': data[5],
                          'quoteVolume': data[6],
                          'isFrozen': data[7],
                          'high24hr': data[8],
                          'low24hr': data[9]
                          }},
                upsert=True)

    def on_error(self, ws, error):
        print(error)

    def on_close(self, ws):
        print("Websocket closed!")

    def on_open(self, ws):
        tick = self.api.returnTicker()
        for market in tick:
            self.db.update_one(
                {'_id': market},
                {'$set': tick[market]},
                upsert=True)
        print('Populated markets database with ticker data')
        self.ws.send(json.dumps({'command': 'subscribe',
                                 'channel': 1002}))

    def start(self):
        self.t = Thread(target=self.ws.run_forever)
        self.t.daemon = True
        self.t.start()
        print('Thread started')

    def stop(self):
        self.ws.close()
        self.t.join()
        print('Thread joined')
class Ticker(object):

    def __init__(self):
        self.ticker = poloniex.Poloniex().returnTicker()
        self._appRunner = ApplicationRunner(
            u"wss://api.poloniex.com:443", u"realm1"
        )
        self._appProcess, self._tickThread = None, None
        self._running = False

    def __call__(self):
        return self.ticker

    def tickCatcher(self):
        print("Catching...")
        while self._running:
            try:
                tick = queue.get(timeout=1)
            except:
                continue
            else:
                self.ticker[tick[0]] = {
                    'last': tick[1],
                    'lowestAsk': tick[2],
                    'highestBid': tick[3],
                    'percentChange': tick[4],
                    'baseVolume': tick[5],
                    'quoteVolume': tick[6],
                    'isFrozen': tick[7],
                    'high24hr': tick[8],
                    'low24hr': tick[9],
                    'id': self.ticker[tick[0]]['id']
                }
        print("Done catching...")

    def start(self):
        """ Start the ticker """
        print("Starting ticker")
        self._appProcess = Process(
            target=self._appRunner.run,
            args=(TickPitcher,)
        )
        self._appProcess.daemon = True
        self._appProcess.start()
        self._running = True
        print('TICKER: tickPitcher process started')
        self._tickThread = Thread(target=self.tickCatcher)
        self._tickThread.deamon = True
        self._tickThread.start()
        print('TICKER: tickCatcher thread started')

    def stop(self):
        """ Stop the ticker """
        print("Stopping ticker")
        self._appProcess.terminate()
        print("Joining Process")
        self._appProcess.join()
        print("Joining thread")
        self._running = False
        self._tickThread.join()
        print("Ticker stopped.")
Пример #51
0
class Loaner(object):
    """ Object for control of threaded Loaner loop"""
    def __init__(self, config):
        if os.path.isfile(config):
            with open(config) as f:
                config = json.load(f)
        self.polo = poloniex.Poloniex(config['key'], config['secret'], extend=True)
        self.coins = config['coins']
        self.interval = config['interval']
        self._running, self._thread = False, None
        self.openLoanOffers = None
        self.availBalance = None

    def _run(self):
        """
        Main loop that is threaded (set Loaner._running to 'False' to stop loop)
        """
        while self._running:
            try:
                self.openLoanOffers = self.polo.myOpenLoanOrders()
                for coin in self.coins:
                    # Check for old offers
                    self.cancelOldOffers(coin)
                self.availBalance = self.polo.myAvailBalances()
                for coin in self.coins:
                    # ALL the coins??
                    if self.coins[coin]['allBal']:
                        self.moveAll2Lending(coin)
                self.availBalance = self.polo.myAvailBalances()
                for coin in self.coins:
                    # Creat new offer
                    self.createLoanOffer(coin)
                # wait the interval (or shutdown)
                for i in range(self.interval*2):
                    if not self._running:
                        break
                    time.sleep(0.5)
            except Exception as e:
                logging.exception(e)
                time.sleep(10)

    def start(self):
        """ Start Loaner.thread"""
        self._thread = Thread(target=self._run)
        self._thread.daemon = True
        self._running = True
        self._thread.start()
        logging.info(P('LOANER:')+C(' started'))

    def stop(self):
        """ Stop Loaner.thread"""
        self._running = False
        self._thread.join()
        logging.info(P('LOANER:')+R(' stopped'))
    
    def moveAll2Lending(self, coin):
        if 'exchange' in self.availBalance:
            if coin in self.availBalance['exchange']:
                result = self.polo.transferBalance(
                    coin,
                    self.availBalance['exchange'][coin],
                    'exchange',
                    'lending'
                    )
                if 'error' in result:
                    raise RuntimeError(P('LOANER:')+' %s' % R(result['error']))
                else:
                    logging.info(P('LOANER:')+' %s' % result['message'])
        if 'margin' in self.availBalance:
            if coin in self.availBalance['margin']:
                result = self.polo.transferBalance(
                    coin, self.availBalance['margin'][coin], 'margin', 'lending'
                    )
                if 'error' in result:
                    raise RuntimeError(P('LOANER:')+' %s' % R(result['error']))
                else:
                    logging.info(P('LOANER:')+' %s' % result['message'])

    def getLoanOfferAge(self, coin, order):
        # epoch of loan order 
        opnTime = poloniex.UTCstr2epoch(order['date'])
        # current epoch
        curTime = time.time()
        # age of open order = now-timeopened
        orderAge = (curTime-opnTime)
        logging.info(P('LOANER:')+' %s order %s has been open %s mins' % (
                C(coin), G(str(order['id'])), C(str(orderAge/60))
                ))
        return orderAge

    def cancelOldOffers(self, coin):
        if coin in self.openLoanOffers:
            for offer in self.openLoanOffers[coin]:
                age = self.getLoanOfferAge(coin, offer)
                # check if it is beyond max age
                if age > self.coins[coin]['maxAge']:
                    result = self.polo.cancelLoanOrder(offer['id'])
                    if 'error' in result:
                        raise RuntimeError(P('LOANER:')+' %s' % R(result['error']))
                    else:
                        logging.info(P('LOANER:')+' %s [ID: %s]' % (
                            C(result['message']), G(str(offer['id']))
                            ))

    def createLoanOffer(self, coin):
        if 'lending' in self.availBalance:
            if coin in self.availBalance['lending']:
                # and amount is more than min
                if float(self.availBalance['lending'][coin]) > self.coins[coin]['minAmount']:
                    # get lowset rate
                    topRate = float(
                            self.polo.marketLoans(coin)['offers'][0]['rate']
                            )
                    # create loan
                    result = self.polo.createLoanOrder(
                            coin,
                            self.availBalance['lending'][coin],
                            topRate+(self.coins[coin]['offset']*0.000001),
                            autoRenew = 1
                            )
                    if 'error' in result:
                        raise RuntimeError(P('LOANER:')+' %s' % R(result['error']))
                    else:
                        logging.info(P('LOANER:')+' %s %s [Amount: %s Rate: %s]' % (
                                C(coin),
                                result['message'].lower(),
                                O(str(self.availBalance['lending'][coin])),
                                O(str(100*(topRate+(self.coins[coin]['offset']*0.000001)))+'%')
                                ))