예제 #1
0
파일: data.py 프로젝트: dooglus/p2pool
 def get_shares(self):
     if self.known is not None:
         raise AssertionError()
     known = {}
     filenames, next = self.get_filenames_and_next()
     for filename in filenames:
         share_hashes, verified_hashes = known.setdefault(filename, (set(), set()))
         with open(filename, 'rb') as f:
             for line in f:
                 try:
                     type_id_str, data_hex = line.strip().split(' ')
                     type_id = int(type_id_str)
                     if type_id == 0:
                         pass
                     elif type_id == 1:
                         pass
                     elif type_id == 2:
                         verified_hash = int(data_hex, 16)
                         yield 'verified_hash', verified_hash
                         verified_hashes.add(verified_hash)
                     elif type_id == 5:
                         share = Share.from_share(share_type.unpack(data_hex.decode('hex')), self.net)
                         yield 'share', share
                         share_hashes.add(share.hash)
                     else:
                         raise NotImplementedError("share type %i" % (type_id,))
                 except Exception:
                     log.err(None, "Error while reading saved shares, continuing where left off:")
     self.known = known
     self.known_desired = dict((k, (set(a), set(b))) for k, (a, b) in known.iteritems())
예제 #2
0
    def get_title(self, url):
        """
        Get the title of the specified url.  If there are any redirects, they
        will first be followed before pulling the title.  Image and pdf links
        will be ignored.

        @param url  - url to pull title for.
        @return     - title if found.
        """
        while True:
            try:
                html = requests.get(url, verify=False)
                html.raise_for_status()
            except requests.exceptions.RequestException, e:
                log.err(str(e))
                return

            if html.headers['content-type'].startswith('image'):
                return
            elif html.headers['content-type'].startswith('application/pdf'):
                return
            else:
                parsed = soup(html.text, 'html.parser')
                if parsed.title is None:
                    redirect = self._meta_redirect(parsed)
                    if not redirect:
                        log.err("Couldn't parse content from %s" % (url,))
                        return
                    else:
                        url = redirect
                else:
                    break
예제 #3
0
def handleError():
    from twisted.python import failure
    global exitStatus
    exitStatus = 2
    reactor.callLater(0.01, _stopReactor)
    log.err(failure.Failure())
    raise
예제 #4
0
 def handleRequest(self, notebook_id, msg):
     """
     """
     try:
         backend, access_id = self.notebook_map[notebook_id]
     except KeyError:
         backend, access_id = self.addNotebook(notebook_id)
     log.msg('notebooks backend: %s' % backend)
     result = yield backend.send(access_id, msg)
     status = result['status']
     if status == 'OK':
         defer.returnValue(result['response'])
     if status == 'ERR':
         """check error"""
         log.err('Backend error %s' % str(result['response']))
         err = result['response']
         if err == 'InvalidAccessId':
             #self.reset_access_id(self, notebook_id)
             nb = notebook_models.Notebook.objects.get(guid=notebook_id)
             engine_type = str(nb.backend.all()[0].engine_type.name)
             new_access_id = yield backend.newAccessId(engine_type)
             nb.backend.all()[0].access_id = new_access_id
             nb.save()
             self.notebook_map[notebook_id] = (backend, new_access_id,)
             result_retry = yield backend.send(new_access_id, msg)
             status = result_retry['status']
             # TODO: Better handling. return no matter what for now
             defer.returnValue(result_retry['response'])
예제 #5
0
 def get_user_details(self, username):
     contents = re.split('([+/])', username)
     assert len(contents) % 2 == 1
     
     user, contents2 = contents[0], contents[1:]
     
     desired_pseudoshare_target = None
     desired_share_target = None
     for symbol, parameter in zip(contents2[::2], contents2[1::2]):
         if symbol == '+':
             try:
                 desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter))
                 
             except:
                 if p2pool.DEBUG:
                     log.err()
         elif symbol == '/':
             try:
                 desired_share_target = bitcoin_data.difficulty_to_target(float(parameter))
                
             except:
                 if p2pool.DEBUG:
                     log.err()
     
     if random.uniform(0, 100) < self.worker_fee:
         pubkey_hash = self.my_pubkey_hash
     else:
         try:
             pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT)
         except: # XXX blah
             pubkey_hash = self.my_pubkey_hash
     
     return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
예제 #6
0
 def run_cleanups():
     try:
         while cleanups:
             fn = cleanups.pop()
             fn()
     except:
         log.err(failure.Failure(), "while running %r" % (run_cleanups,))
예제 #7
0
 def download_shares():
     while True:
         desired = yield self.node.desired_var.get_when_satisfies(lambda val: len(val) != 0)
         peer_addr, share_hash = random.choice(desired)
         
         if len(self.peers) == 0:
             yield deferral.sleep(1)
             continue
         peer = random.choice(self.peers.values())
         
         print 'Requesting parent share %s from %s' % (p2pool_data.format_hash(share_hash), '%s:%i' % peer.addr)
         try:
             shares = yield peer.get_shares(
                 hashes=[share_hash],
                 parents=random.randrange(500), # randomize parents so that we eventually get past a too large block of shares
                 stops=list(set(self.node.tracker.heads) | set(
                     self.node.tracker.get_nth_parent_hash(head, min(max(0, self.node.tracker.get_height_and_last(head)[0] - 1), 10)) for head in self.node.tracker.heads
                 ))[:100],
             )
         except defer.TimeoutError:
             print 'Share request timed out!'
             continue
         except:
             log.err(None, 'in download_shares:')
             continue
         
         if not shares:
             yield deferral.sleep(1) # sleep so we don't keep rerequesting the same share nobody has
             continue
         self.handle_shares([(share, []) for share in shares], peer)
예제 #8
0
파일: netstring.py 프로젝트: Rus-L/txjason
 def _lostProtocol(self, reason):
     log.err(reason, '%r disconnected' % (self,))
     deferreds, self._notifyOnDisconnect = self._notifyOnDisconnect, []
     for d in deferreds:
         d.errback(reason)
     self._proto = None
     self.client.cancelRequests()
예제 #9
0
 def _not_attached(self, why, slave):
     # already log.err'ed by SlaveBuilder._attachFailure
     # TODO: remove from self.slaves (except that detached() should get
     #       run first, right?)
     log.err(why, 'slave failed to attach')
     self.builder_status.addPointEvent(['failed', 'connect',
                                        slave.slavename])
예제 #10
0
    def start_instance(self, build):
        """
        I start a new instance of a VM.

        If a base_image is specified, I will make a clone of that otherwise i will
        use image directly.

        If i'm not given libvirt domain definition XML, I will look for my name
        in the list of defined virtual machines and start that.
        """
        if self.domain is not None:
            log.msg("Cannot start_instance '%s' as already active" % self.workername)
            defer.returnValue(False)

        yield self._prepare_base_image()

        try:
            if self.xml:
                self.domain = yield self.connection.create(self.xml)
            else:
                self.domain = yield self.connection.lookupByName(self.workername)
                yield self.domain.create()
        except Exception:
            log.err(failure.Failure(),
                    "Cannot start a VM (%s), failing gracefully and triggering"
                    "a new build check" % self.workername)
            self.domain = None
            defer.returnValue(False)

        defer.returnValue(True)
예제 #11
0
파일: proxy.py 프로젝트: boyska/infamitm
    def process(self):
        log.msg("PROCESS: %s" % id(self))
        log.msg("URI:%s PATH %s" % (self.uri, self.path + str(self.args)))
        log.msg(
            "Request:\n\t%s"
            % "\n\t".join(("%s\t%s" % (x[0], ";".join(x[1])) for x in self.requestHeaders.getAllRawHeaders()))
        )
        session = Session(self)

        session.preRequest()
        host = self.getHeader("host")
        if not host:
            log.err("No host header given")
            self.setResponseCode(400)
            self.finish()
            return

        port = 80
        if ":" in host:
            host, port = host.split(":")
            port = int(port)
        self.setHost(host, port)

        log.msg("URI:%s PATH %s" % (self.uri, self.path + str(self.args)))
        log.msg(
            "Request:\n\t%s"
            % "\n\t".join(("%s\t%s" % (x[0], ";".join(x[1])) for x in self.requestHeaders.getAllRawHeaders()))
        )

        self.content.seek(0, 0)
        postData = self.content.read()
        factory = ProxyClientFactory(self.method, self.uri, postData, self.requestHeaders.getAllRawHeaders(), session)
        self.reactor.connectTCP(host, port, factory)
 def appengineIncomming(self, payload):
     if payload["p"] == "http":
         self.sendCallbackViaHTTP(payload)
     elif payload["p"] == "xmpp":
         self.sendCallbackViaXMPP(payload)
     else:
         log.err("Unknown callback protocol: %s" % payload["p"])
예제 #13
0
파일: ai.py 프로젝트: bithub/bit.bot.aiml
    def wake(self, verbose=False):
        log.err('bit.bot.aiml: BitAI.wake')
        self.bot.verbose(verbose)
        self.bot.setBotPredicate('name', self.name)
        self.bot.setBotPredicate('age', '~180')
        self.bot.setBotPredicate('location', 'Trinity')
        self.bot.setBotPredicate('gender', 'male')
        self.bot.setBotPredicate('party', 'libertarian socialist')
        return
        self.bot.setPredicate('secure', "yes")

        aiml_dir = os.path.join(os.path.dirname(__file__), 'aiml')
        var_dir = os.path.join(os.getcwd(), 'var')

        #self.bot.learn(os.path.join(aiml_dir,'trinity.aiml'))
        #self.bot.setPredicate('secure', "no")
        #return

        brain = os.path.join(var_dir, "%s.brn" % self.name)
        if os.path.isfile(brain):
            self.bot.bootstrap(brainFile=brain)
        else:
            for ai in os.listdir(aiml_dir):
                if ai.endswith('aiml'):
                    self.bot.learn(os.path.join(aiml_dir, ai))
        #self.bot.saveBrain(brain)
        self.bot.setPredicate('secure', "no")
예제 #14
0
 def _fail(self, reason, request):
     """
     """
     log.err('EngineSessionAdapter fail %s' % reason)
     result = json.dumps({'status':'ERR', 'response':str(reason)})
     request.write(result)
     request.finish()
예제 #15
0
파일: guard.py 프로젝트: fxia22/ASM_xf
 def portalLogout(self, port):
     p = self.portals.get(port)
     if p:
         r, l = p
         try: l()
         except: log.err()
         del self.portals[port]
예제 #16
0
 def _maybe_retry(e):
     log.err()
     if attempt < self.max_attempts:
         reactor.callLater(attempt * self.retry_multiplier,
                           self._retrying_fetch, u, data, event, attempt + 1)
     else:
         return e
예제 #17
0
    def join_stage(self, stage_ID):
        """Connects the client to a stage automatically leaving the last one
        (if any, which is currently barely possible)
        requires stage ID in factory.stages.
        Returns self.stage, which will be None if no join has succeeded."""
        newstage = self.factory.stages.get(stage_ID, None)
        if newstage is not None:
            if self.stage is not None:
                self.stage.drop_socket(self)

            newstage.add_socket(self)
            self.stage = newstage
            # Send full stage name to the client
            self.send("STAGE_NAME", stageName=newstage.name, stageID=newstage.ID)

            if self.player is self.factory.audience:
                self.modes = self.audience_modes
            else:
                self.modes = self.player_modes
            """
            
            if newstage.isPlayerAudience(self.player) is True:
                self.modes = self.audience_modes
            else:
                self.modes = self.player_modes
            """

        else:  # stage doesn't exist
            log.err("tried to join missing stage ('%s'). can't do that." % stage_ID)
        return self.stage
예제 #18
0
        def old_way():
            d = None
            for b in itervalues(self.buildslave.slavebuilders):
                if b.remote:
                    d = b.mind.callRemote("shutdown")
                    break

            if d:
                name = self.buildslave.slavename
                log.msg("Shutting down (old) slave: %s" % name)
                # The remote shutdown call will not complete successfully since
                # the buildbot process exits almost immediately after getting
                # the shutdown request.
                # Here we look at the reason why the remote call failed, and if
                # it's because the connection was lost, that means the slave
                # shutdown as expected.

                @d.addErrback
                def _errback(why):
                    if why.check(pb.PBConnectionLost):
                        log.msg("Lost connection to %s" % name)
                    else:
                        log.err("Unexpected error when trying to shutdown %s"
                                % name)
                return d
            log.err("Couldn't find remote builder to shut down slave")
            return defer.succeed(None)
예제 #19
0
파일: put_common.py 프로젝트: jrossi/twext
 def Rollback(self):
     """
     Rollback the server state. Do not allow this to raise another exception. If
     rollback fails then we are going to be left in an awkward state that will need
     to be cleaned up eventually.
     """
     if self.active:
         self.active = False
         log.err("Rollback: rollback")
         try:
             if self.source_copy and self.source_deleted:
                 self.source_copy.moveTo(source.fp)
                 log.err("Rollback: source restored %s to %s" % (self.source_copy.path, source.fp.path))
                 self.source_copy = None
                 self.source_deleted = False
             if self.destination_copy:
                 destination.fp.remove()
                 log.err("Rollback: destination restored %s to %s" % (self.destination_copy.path, destination.fp.path))
                 self.destination_copy.moveTo(destination.fp)
                 self.destination_copy = None
             elif self.destination_created:
                 destination.fp.remove()
                 log.err("Rollback: destination removed %s" % (destination.fp.path,))
                 self.destination_created = False
         except:
             log.err("Rollback: exception caught and not handled: %s" % failure.Failure())
예제 #20
0
def handleError():
    from twisted.python import failure
    global exitStatus
    exitStatus = 2
    log.err(failure.Failure())
    reactor.stop()
    raise
예제 #21
0
 def addLogWithFailure(self, why, logprefix=""):
     # helper for showing exceptions to the users
     try:
         yield self.addCompleteLog(logprefix + "err.text", why.getTraceback())
         yield self.addHTMLLog(logprefix + "err.html", formatFailure(why))
     except Exception:
         log.err(Failure(), "error while formatting exceptions")
 def proxy():
     try:
         assert self.session
         return fn(self.session, *args, **kwargs)
     except:
         log.err()
         raise
예제 #23
0
파일: rest.py 프로젝트: Riziero/buildbot
 def handleErrors(self, writeError):
     try:
         yield
     except exceptions.InvalidPathError as e:
         writeError(str(e) or "invalid path", errcode=404,
                    jsonrpccode=JSONRPC_CODES['invalid_request'])
         return
     except exceptions.InvalidControlException as e:
         writeError(str(e) or "invalid control action", errcode=501,
                    jsonrpccode=JSONRPC_CODES["method_not_found"])
         return
     except BadRequest as e:
         writeError(str(e) or "invalid request", errcode=400,
                    jsonrpccode=JSONRPC_CODES["method_not_found"])
         return
     except BadJsonRpc2 as e:
         writeError(e.message, errcode=400, jsonrpccode=e.jsonrpccode)
         return
     except Forbidden as e:
         # There is nothing in jsonrc spec about forbidden error, so pick
         # invalid request
         writeError(
             e.message, errcode=403, jsonrpccode=JSONRPC_CODES["invalid_request"])
         return
     except Exception as e:
         log.err(_why='while handling API request')
         writeError(repr(e), errcode=500,
                    jsonrpccode=JSONRPC_CODES["internal_error"])
         return
예제 #24
0
 def callTargetSingle(self,targetKey,*args,**kw):
     '''call Target by Single
     @param conn: client connection
     @param targetKey: target ID
     @param data: client data
     '''
     
     self._lock.acquire()
     try:
         target = self.getTarget(0)
         if not target:
             log.err('the command '+str(targetKey)+' not Found on service')
             return None
         if targetKey not in self.unDisplay:
             log.msg("call method %s on service[single]"%target.__name__)
         defer_data = target(targetKey,*args,**kw)
         if not defer_data:
             return None
         if isinstance(defer_data,defer.Deferred):
             return defer_data
         d = defer.Deferred()
         d.callback(defer_data)
     finally:
         self._lock.release()
     return d
예제 #25
0
파일: master.py 프로젝트: MPanH/buildbot
    def doReconfig(self):
        log.msg("beginning configuration update")
        changes_made = False
        failed = False
        try:
            # Run the master.cfg in thread, so that it cas use blocking code
            new_config = yield threads.deferToThreadPool(
                self.reactor, self.reactor.getThreadPool(),
                self.config_loader.loadConfig)
            changes_made = True
            self.config = new_config

            yield self.reconfigServiceWithBuildbotConfig(new_config)

        except config.ConfigErrors as e:
            for msg in e.errors:
                log.msg(msg)
            failed = True

        except Exception:
            log.err(failure.Failure(), 'during reconfig:')
            failed = True

        if failed:
            if changes_made:
                log.msg("WARNING: reconfig partially applied; master "
                        "may malfunction")
            else:
                log.msg("reconfig aborted without making any changes")
        else:
            log.msg("configuration update complete")
예제 #26
0
파일: steps.py 프로젝트: opalmer/buildbot
    def _validate_expectation(self, exp, command):
        got = (command.remote_command, command.args)

        for child_exp in exp.nestedExpectations():
            try:
                yield self._validate_expectation(child_exp, command)
                exp.expectationPassed(exp)
            except AssertionError as e:
                # log this error, as the step may swallow the AssertionError or
                # otherwise obscure the failure.  Trial will see the exception in
                # the log and print an [ERROR].  This may result in
                # double-reporting, but that's better than non-reporting!
                log.err()
                exp.raiseExpectationFailure(child_exp, e)

        if exp.shouldAssertCommandEqualExpectation():
            # handle any incomparable args
            for arg in exp.incomparable_args:
                self.assertTrue(arg in got[1],
                                "incomparable arg '%s' not received" % (arg,))
                del got[1][arg]

            # first check any ExpectedRemoteReference instances
            exp_tup = (exp.remote_command, exp.args)
            if exp_tup != got:
                _describe_cmd_difference(exp, command)
                raise AssertionError(
                    "Command contents different from expected; see logs")

        if exp.shouldRunBehaviors():
            # let the Expect object show any behaviors that are required
            yield exp.runBehaviors(command)
예제 #27
0
    def _call_method(self, request):
        """Calls given method with given params and returns it value."""
        method = self.method_data[request['method']]['method']
        params = request['params']
        result = None
        try:
            if isinstance(params, list):
                # Does it have enough arguments?
                if len(params) < self._man_args(method):
                    raise InvalidParamsError('not enough arguments')
                # Does it have too many arguments?
                if not self._vargs(method) \
                        and len(params) > self._max_args(method):
                    raise InvalidParamsError('too many arguments')

                result = yield defer.maybeDeferred(method, *params)
            elif isinstance(params, dict):
                # Do not accept keyword arguments if the jsonrpc version is
                # not >=1.1.
                if request['jsonrpc'] < 11:
                    raise KeywordError

                result = yield defer.maybeDeferred(method, **params)
            else:  # No params
                result = yield defer.maybeDeferred(method)
        except JSONRPCError:
            raise
        except Exception:
            # Exception was raised inside the method.
            log.msg('Exception raised while invoking RPC method "{}".'.format(
                    request['method']))
            log.err()
            raise ServerError

        defer.returnValue(result)
예제 #28
0
def encode_base64(msg):
    """
    Encode a non-multipart message's payload in Base64 (in place).

    This method modifies the message contents in place and adds or replaces an
    appropriate Content-Transfer-Encoding header.

    :param msg: The non-multipart message to be encoded.
    :type msg: email.message.Message
    """
    encoding = msg.get('Content-Transfer-Encoding', None)
    if encoding is not None:
        encoding = encoding.lower()
    # XXX Python's email module can only decode quoted-printable, base64 and
    # uuencoded data, so we might have to implement other decoding schemes in
    # order to support RFC 3156 properly and correctly calculate signatures
    # for multipart attachments (eg. 7bit or 8bit encoded attachments). For
    # now, if content is already encoded as base64 or if it is encoded with
    # some unknown encoding, we just pass.
    if encoding in [None, 'quoted-printable', 'x-uuencode', 'uue', 'x-uue']:
        orig = msg.get_payload(decode=True)
        encdata = _bencode(orig)
        msg.set_payload(encdata)
        # replace or set the Content-Transfer-Encoding header.
        try:
            msg.replace_header('Content-Transfer-Encoding', 'base64')
        except KeyError:
            msg['Content-Transfer-Encoding'] = 'base64'
    elif encoding is not 'base64':
        log.err('Unknown content-transfer-encoding: %s' % encoding)
예제 #29
0
파일: i2c.py 프로젝트: FirstCypress/LiV
def LOG_ERR(msg):
    """
    Abstraction method for logging errors.
    @type msg: C{str}
    @param msg: a error message to be logged.
    """
    log.err('[error]\t{0}'.format(msg))
예제 #30
0
    def taskFailed(self, reason):
        # reason = TaskFailure
        errorMail = reason.mail.reply()
        errorTask = reason.task
        allTasks = reason.tasks
        results = reason.results
        format = reason.format
        errorIndex = allTasks.index(errorTask)
        error_nr = getattr(reason.value, 'ERRNO', 8002)

        # Create a Fault struct
        fault = {
            'faultCode': error_nr,
            'faultString': reason.getErrorMessage(),
        }
        all_results = list(results)
        all_results.append(fault)

        xml_attachment = None
        body = ""
        try:
            xml_attachment = iapplicationservermailmessage.XmlAttachment(all_results, RESULT_ATTACHMENT_FILENAME)
        except ValueError, ex:
            msg = "Could not attach results to reply mail."
            log.err(msg)
            log.err(ex)
            body += msg
예제 #31
0
 def _errback(self, failure):
     log.err(failure)
예제 #32
0
 def _err(failure):
     log.err(failure)
     reactor.stop()
예제 #33
0
 def _errback(why):
     if why.check(pb.PBConnectionLost):
         log.msg("Lost connection to %s" % name)
     else:
         log.err("Unexpected error when trying to shutdown %s"
                 % name)
예제 #34
0
 def downloadFailed(self, reasons, uri):
     log.err(reasons)
     self.notifyDownloadFailed(uri, reasons)
     del self.downloaders[uri]
     self.tryDownload()
예제 #35
0
 def onUserError(self, e, msg):
     log.err(e, msg)
예제 #36
0
 def _ebRender(self, failure, req_id):
     if isinstance(failure.value, jsonrpclib.Fault):
         return failure.value
     log.err(failure)
     return jsonrpclib.Fault(self.FAILURE, "error")
예제 #37
0
    def create(self, store, notify_just_mdoc=False, pending_inserts_dict=None):
        """
        Create all the parts for this message in the store.

        :param store: an instance of Soledad

        :param notify_just_mdoc:
            if set to True, this method will return *only* the deferred
            corresponding to the creation of the meta-message document.
            Be warned that in that case there will be no record of failures
            when creating the other part-documents.

            Otherwise, this method will return a deferred that will wait for
            the creation of all the part documents.

            Setting this flag to True is mostly a convenient workaround for the
            fact that massive serial appends will take too much time, and in
            most of the cases the MUA will only switch to the mailbox where the
            appends have happened after a certain time, which in most of the
            times will be enough to have all the queued insert operations
            finished.
        :type notify_just_mdoc: bool
        :param pending_inserts_dict:
            a dictionary with the pending inserts ids.
        :type pending_inserts_dict: dict

        :return: a deferred whose callback will be called when either all the
                 part documents have been written, or just the metamsg-doc,
                 depending on the value of the notify_just_mdoc flag
        :rtype: defer.Deferred
        """
        if pending_inserts_dict is None:
            pending_inserts_dict = {}

        leap_assert(self.cdocs,
                    "Need non empty cdocs to create the "
                    "MessageWrapper documents")
        leap_assert(self.mdoc.doc_id is None,
                    "Cannot create: mdoc has a doc_id")
        leap_assert(self.fdoc.doc_id is None,
                    "Cannot create: fdoc has a doc_id")

        def unblock_pending_insert(result):
            if pending_inserts_dict:
                ci_headers = lowerdict(self.hdoc.headers)
                msgid = ci_headers.get('message-id', None)
                try:
                    d = pending_inserts_dict[msgid]
                    d.callback(msgid)
                except KeyError:
                    pass
            return result

        # TODO check that the doc_ids in the mdoc are coherent
        self.d = []

        mdoc_created = self.mdoc.create(store, is_copy=self._is_copy)
        fdoc_created = self.fdoc.create(store, is_copy=self._is_copy)

        mdoc_created.addErrback(lambda f: log.err(f))
        fdoc_created.addErrback(lambda f: log.err(f))

        self.d.append(mdoc_created)
        self.d.append(fdoc_created)

        if not self._is_copy:
            if self.hdoc.doc_id is None:
                self.d.append(self.hdoc.create(store))
            for cdoc in self.cdocs.values():
                if cdoc.doc_id is not None:
                    # we could be just linking to an existing
                    # content-doc.
                    continue
                self.d.append(cdoc.create(store))

        def log_all_inserted(result):
            log.msg("All parts inserted for msg!")
            return result

        self.all_inserted_d = defer.gatherResults(self.d, consumeErrors=True)
        self.all_inserted_d.addCallback(log_all_inserted)
        self.all_inserted_d.addCallback(unblock_pending_insert)
        self.all_inserted_d.addErrback(lambda failure: log.err(failure))

        if notify_just_mdoc:
            return mdoc_created
        else:
            return self.all_inserted_d
예제 #38
0
    def processData(self, data):

        currenttime = datetime.utcnow()
        date = datetime.strftime(currenttime, "%Y-%m-%d")
        actualtime = datetime.strftime(currenttime, "%Y-%m-%dT%H:%M:%S.%f")
        outtime = datetime.strftime(currenttime, "%H:%M:%S")
        timestamp = datetime.strftime(currenttime, "%Y-%m-%d %H:%M:%S.%f")
        ## TODO??? -> Roman!
        #intensity = 88888.8
        pressure1 = 88888.8
        pressure2 = 88888.8
        typ = "none"
        dontsavedata = False

#        packcode = '6hLLL'
#        header = "# MagPyBin %s %s %s %s %s %s %d" % (self.sensor, '[var3,var4]', '[p1,p2]', '[mBar,mBar]', '[1000,1000]', packcode, struct.calcsize(packcode))
        packcode = '6hLL'
        header = "# MagPyBin %s %s %s %s %s %s %d" % (self.sensor, '[var3]', '[p]', '[mBar]', '[1000]', packcode, struct.calcsize(packcode))

        try:
            # Extract data
            data_array = data.strip().split(',')
            #print data_array, len(data_array)
            if len(data_array) == 2:
                typ = "valid"
            # add other types here
        except:
            # TODO??? base x mobile?
            log.err('BM35 - Protocol: Output format not supported - use either base, ... or mobile')
        # Extracting the data from the instrument

        if typ == "valid": 
            pressure1 = float(data_array[0].strip())
            # pressure1 is raw 
            pressure2 = float(data_array[1].strip())
            # pressure2 is calculated from pressure1 by calibration values
        elif typ == "none":
            dontsavedata = True
            pass

        # TODO right now, saving data is not necessary
        try:
            if not typ == "none":
                # extract time data
                datearray = timeToArray(timestamp)
                try:
                    datearray.append(int(pressure2*1000.))
                    data_bin = struct.pack(packcode,*datearray)
                    dataToFile(self.outputdir,self.sensor, date, data_bin, header)
                except:
                    log.msg('BM35 - Protocol: Error while packing binary data')
                    pass
        except:
            log.msg('BM35 - Protocol: Error with binary save routine')
            pass
        evt1 = {'id': 1, 'value': timestamp}
        evt35 = {'id': 35, 'value': pressure2}
        if not ((pressure2 < 1300) and (pressure2 > 800)):
            print('BM35: Druck ausserhalb ',pressure2)
        evt99 = {'id': 99, 'value': 'eol'}

        return evt1,evt35,evt99
예제 #39
0
        try:
            dbname = CONIFG.get('output_influx', 'database_name')
        else:
            dbname = 'cowrie'

        retention_policy_duration_default = '12w'
        retention_policy_name = dbname + "_retention_policy"

        if CONFIG.has_option('output_influx', 'retention_policy_duration'):
            retention_policy_duration = CONFIG.get(
                'output_influx', 'retention_policy_duration')

            match = re.search('^\d+[dhmw]{1}$', retention_policy_duration)
            if not match:
                log.err(("output_influx: invalid retention policy."
                        "Using default '{}'..").format(
                        retention_policy_duration))
                retention_policy_duration = retention_policy_duration_default
        else:
            retention_policy_duration = retention_policy_duration_default

        database_list = self.client.get_list_database()
        dblist = [str(elem['name']) for elem in database_list]

        if dbname not in dblist:
            self.client.create_database(dbname)
            self.client.create_retention_policy(
                retention_policy_name, retention_policy_duration, 1,
                database=dbname, default=True)
        else:
            retention_policies_list = self.client.get_list_retention_policies(
예제 #40
0
 def err(failure):
     log.err(failure)
예제 #41
0
 def kuit():
     log.err('Bye !')
     reactor.stop()  # @UndefinedVariable
예제 #42
0
    def write(self, entry):
        if self.client is None:
            log.err("output_influx: client object is not instantiated")
            return

        # event id
        eventid = entry['eventid']

        # measurement init
        m = {
            'measurement': eventid.replace('.','_'),
            'tags': {
                'session': entry['session'],
                'src_ip': entry['src_ip']
            },
            'fields': {
                'sensor': self.sensor
            },
        }

        # event parsing
        if eventid in ['cowrie.command.failed',
                       'cowrie.command.input']:
            m['fields'].update({
                'input': entry['input'],
            })

        elif eventid == 'cowrie.session.connect':
            m['fields'].update({
                'protocol': entry['protocol'],
                'src_port': entry['src_port'],
                'dst_port': entry['dst_port'],
                'dst_ip': entry['dst_ip'],
            })

        elif eventid in ['cowrie.login.success', 'cowrie.login.failed']:
            m['fields'].update({
                'username': entry['username'],
                'password': entry['password'],
            })

        elif eventid == 'cowrie.session.file_download':
            m['fields'].update({
                'shasum': entry.get('shasum'),
                'url': entry.get('url'),
                'outfile': entry.get('outfile')
                })

        elif eventid == 'cowrie.session.file_download.failed':
            m['fields'].update({
                'url': entry.get('url')
                })

        elif eventid == 'cowrie.session.file_upload':
            m['fields'].update({
                'shasum': entry.get('shasum'),
                'outfile': entry.get('outfile'),
            })

        elif eventid == 'cowrie.session.closed':
            m['fields'].update({
                'duration': entry['duration']
            })

        elif eventid == 'cowrie.client.version':
            m['fields'].update({
                'maccs': ','.join(entry['macCS']),
                'kexalgs': ','.join(entry['kexAlgs']),
                'keyalgs': ','.join(entry['keyAlgs']),
                'version': ','.join(entry['version']),
                'compcs': ','.join(entry['compCS']),
                'enccs': ','.join(entry['encCS'])
            })

        elif eventid == 'cowrie.client.size':
            m['fields'].update({
                'height': entry['height'],
                'width': entry['width'],
            })

        elif eventid == 'cowrie.client.var':
            m['fields'].update({
                'name': entry['name'],
                'value': entry['value'],
            })

        elif eventid == 'cowrie.client.fingerprint':
            m['fields'].update({
                'fingerprint': entry['fingerprint']
            })

            # cowrie.direct-tcpip.data, cowrie.direct-tcpip.request
            # cowrie.log.closed cowrie.log.open
            # are not implemented
        else:
            # other events should be handled
            log.err(
                "output_influx: event '{}' not handled. Skipping..".format(
                    eventid))
            return

        result = self.client.write_points([m])

        if not result:
            log.err("output_influx: error when writing '{}' measurement"
                    "in the db.".format(eventid))
예제 #43
0
 def event_upnpAV(self, evt, varname):
     log.err("upnpAV event: %s %s" % (varname, evt))
예제 #44
0
 def attached(self, bot):
     try:
         yield AbstractWorker.attached(self, bot)
     except Exception as e:
         log.err(e, "worker %s cannot attach" % (self.name, ))
         return
예제 #45
0
 def _processChangesFailure(self, f):
     log.msg('BitbucketPullrequestPoller: json api poll failed')
     log.err(f)
     # eat the failure to continue along the defered chain - we still want to catch up
     return None
예제 #46
0
 def event_ohPLAYLIST(self, evt, varname):
     log.err("ohPLAYLIST event: %s %s" % (varname, evt))
예제 #47
0
파일: proxy.py 프로젝트: DomExpire/eppproxy
 def _fail(reason):
     log.err(reason)
     log.err(
         "Failed connecting to master EPP server, disconnecting client."
     )
     server_protocol.transport.loseConnection()
예제 #48
0
 def result(res):
     log.err(res)
     if not res:
         log.err('Seek Failed: %s')
예제 #49
0
 def errback(failure):
     try:
         failure.raiseException()
     except:
         log.err()
     channel.msg("Error looking up issue #%s" % issue_id)
예제 #50
0
파일: proxy.py 프로젝트: DomExpire/eppproxy
 def _fail(reason):
     log.err(reason)
예제 #51
0
def main(args, net, datadir_path, merged_urls, worker_endpoint):
    try:
        print 'p2pool (version %s)' % (p2pool.__version__,)
        print
        
        @defer.inlineCallbacks
        def connect_p2p():
            # connect to helpd over help-p2p
            print '''Testing helpd P2P connection to '%s:%s'...''' % (args.helpd_address, args.helpd_p2p_port)
            factory = help_p2p.ClientFactory(net.PARENT)
            reactor.connectTCP(args.helpd_address, args.helpd_p2p_port, factory)
            def long():
                print '''    ...taking a while. Common reasons for this include all of helpd's connection slots being used...'''
            long_dc = reactor.callLater(5, long)
            yield factory.getProtocol() # waits until handshake is successful
            if not long_dc.called: long_dc.cancel()
            print '    ...success!'
            print
            defer.returnValue(factory)
        
        if args.testnet: # establish p2p connection first if testnet so helpd can work without connections
            factory = yield connect_p2p()
        
        # connect to helpd over JSON-RPC and do initial getmemorypool
        url = '%s://%s:%i/' % ('https' if args.helpd_rpc_ssl else 'http', args.helpd_address, args.helpd_rpc_port)
        print '''Testing helpd RPC connection to '%s' with username '%s'...''' % (url, args.helpd_rpc_username)
        helpd = jsonrpc.HTTPProxy(url, dict(Authorization='Basic ' + base64.b64encode(args.helpd_rpc_username + ':' + args.helpd_rpc_password)), timeout=30)
        yield helper.check(helpd, net)
        temp_work = yield helper.getwork(helpd, net)
        
        helpd_getnetworkinfo_var = variable.Variable(None)
        @defer.inlineCallbacks
        def poll_warnings():
            helpd_getnetworkinfo_var.set((yield deferral.retry('Error while calling getnetworkinfo:')(helpd.rpc_getnetworkinfo)()))
        yield poll_warnings()
        deferral.RobustLoopingCall(poll_warnings).start(20*60)
        
        print '    ...success!'
        print '    Current block hash: %x' % (temp_work['previous_block'],)
        print '    Current block height: %i' % (temp_work['height'] - 1,)
        print
        
        if not args.testnet:
            factory = yield connect_p2p()
        
        print 'Determining payout address...'
        pubkeys = keypool()
        if args.pubkey_hash is None and args.address != 'dynamic':
            address_path = os.path.join(datadir_path, 'cached_payout_address')
            
            if os.path.exists(address_path):
                with open(address_path, 'rb') as f:
                    address = f.read().strip('\r\n')
                print '    Loaded cached address: %s...' % (address,)
            else:
                address = None
            
            if address is not None:
                res = yield deferral.retry('Error validating cached address:', 5)(lambda: helpd.rpc_validateaddress(address))()
                if not res['isvalid'] or not res['ismine']:
                    print '    Cached address is either invalid or not controlled by local helpd!'
                    address = None
            
            if address is None:
                print '    Getting payout address from helpd...'
                address = yield deferral.retry('Error getting payout address from helpd:', 5)(lambda: helpd.rpc_getaccountaddress('p2pool'))()
            
            with open(address_path, 'wb') as f:
                f.write(address)
            
            my_pubkey_hash = help_data.address_to_pubkey_hash(address, net.PARENT)
            print '    ...success! Payout address:', help_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT)
            print
            pubkeys.addkey(my_pubkey_hash)
        elif args.address != 'dynamic':
            my_pubkey_hash = args.pubkey_hash
            print '    ...success! Payout address:', help_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT)
            print
            pubkeys.addkey(my_pubkey_hash)
        else:
            print '    Entering dynamic address mode.'

            if args.numaddresses < 2:
                print ' ERROR: Can not use fewer than 2 addresses in dynamic mode. Resetting to 2.'
                args.numaddresses = 2
                keys = []
                keyweights = []
                stamp = time.time()
                payouttotal = 0.0

                def addkey(self, n):
                    self.keys.append(n)
                    self.keyweights.append(random.uniform(0,100.0))
                def delkey(self, n):
                    try:
                        i=self.keys.index(n)
                        self.keys.pop(i)
                        self.keyweights.pop(i)
                    except:
                        pass

                def weighted(self):
                    choice=random.uniform(0,sum(self.keyweights))
                    tot = 0.0
                    ind = 0
                    for i in (self.keyweights):
                        tot += i
                        if tot >= choice:
                            return ind
                        ind += 1
                    return ind

                def popleft(self):
                    if (len(self.keys) > 0):
                        dummyval=self.keys.pop(0)
                    if (len(self.keyweights) > 0):
                        dummyval=self.keyweights.pop(0)

                def updatestamp(self, n):
                    self.stamp = n

                def paytotal(self):
                    self.payouttotal = 0.0
                    for i in xrange(len(pubkeys.keys)):
                        self.payouttotal += node.get_current_txouts().get(help_data.pubkey_hash_to_script2(pubkeys.keys[i]), 0)*1e-8
                    return self.payouttotal

                def getpaytotal(self):
                    return self.payouttotal

            pubkeys = keypool()
            for i in xrange(args.numaddresses):
                address = yield deferral.retry('Error getting a dynamic address from helpd:', 5)(lambda: helpd.rpc_getnewaddress('p2pool'))()
                new_pubkey = help_data.address_to_pubkey_hash(address, net.PARENT)
                pubkeys.addkey(new_pubkey)

            pubkeys.updatestamp(time.time())

            my_pubkey_hash = pubkeys.keys[0]

            for i in xrange(len(pubkeys.keys)):
                print '    ...payout %d: %s' % (i, help_data.pubkey_hash_to_address(pubkeys.keys[i], net.PARENT),)
        
        print "Loading shares..."
        shares = {}
        known_verified = set()
        def share_cb(share):
            share.time_seen = 0 # XXX
            shares[share.hash] = share
            if len(shares) % 1000 == 0 and shares:
                print "    %i" % (len(shares),)
        ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net, share_cb, known_verified.add)
        print "    ...done loading %i shares (%i verified)!" % (len(shares), len(known_verified))
        print
        
        
        print 'Initializing work...'
        
        node = p2pool_node.Node(factory, helpd, shares.values(), known_verified, net)
        yield node.start()
        
        for share_hash in shares:
            if share_hash not in node.tracker.items:
                ss.forget_share(share_hash)
        for share_hash in known_verified:
            if share_hash not in node.tracker.verified.items:
                ss.forget_verified_share(share_hash)
        node.tracker.removed.watch(lambda share: ss.forget_share(share.hash))
        node.tracker.verified.removed.watch(lambda share: ss.forget_verified_share(share.hash))
        
        def save_shares():
            for share in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 2*net.CHAIN_LENGTH)):
                ss.add_share(share)
                if share.hash in node.tracker.verified.items:
                    ss.add_verified_hash(share.hash)
        deferral.RobustLoopingCall(save_shares).start(60)
        
        print '    ...success!'
        print
        
        
        print 'Joining p2pool network using port %i...' % (args.p2pool_port,)
        
        @defer.inlineCallbacks
        def parse(host):
            port = net.P2P_PORT
            if ':' in host:
                host, port_str = host.split(':')
                port = int(port_str)
            defer.returnValue(((yield reactor.resolve(host)), port))
        
        addrs = {}
        if os.path.exists(os.path.join(datadir_path, 'addrs')):
            try:
                with open(os.path.join(datadir_path, 'addrs'), 'rb') as f:
                    addrs.update(dict((tuple(k), v) for k, v in json.loads(f.read())))
            except:
                print >>sys.stderr, 'error parsing addrs'
        for addr_df in map(parse, net.BOOTSTRAP_ADDRS):
            try:
                addr = yield addr_df
                if addr not in addrs:
                    addrs[addr] = (0, time.time(), time.time())
            except:
                log.err()
        
        connect_addrs = set()
        for addr_df in map(parse, args.p2pool_nodes):
            try:
                connect_addrs.add((yield addr_df))
            except:
                log.err()
        
        node.p2p_node = p2pool_node.P2PNode(node,
            port=args.p2pool_port,
            max_incoming_conns=args.p2pool_conns,
            addr_store=addrs,
            connect_addrs=connect_addrs,
            desired_outgoing_conns=args.p2pool_outgoing_conns,
            advertise_ip=args.advertise_ip,
            external_ip=args.p2pool_external_ip,
        )
        node.p2p_node.start()
        
        def save_addrs():
            with open(os.path.join(datadir_path, 'addrs'), 'wb') as f:
                f.write(json.dumps(node.p2p_node.addr_store.items()))
        deferral.RobustLoopingCall(save_addrs).start(60)
        
        print '    ...success!'
        print
        
        if args.upnp:
            @defer.inlineCallbacks
            def upnp_thread():
                while True:
                    try:
                        is_lan, lan_ip = yield ipdiscover.get_local_ip()
                        if is_lan:
                            pm = yield portmapper.get_port_mapper()
                            yield pm._upnp.add_port_mapping(lan_ip, args.p2pool_port, args.p2pool_port, 'p2pool', 'TCP')
                    except defer.TimeoutError:
                        pass
                    except:
                        if p2pool.DEBUG:
                            log.err(None, 'UPnP error:')
                    yield deferral.sleep(random.expovariate(1/120))
            upnp_thread()
        
        # start listening for workers with a JSON-RPC server
        
        print 'Listening for workers on %r port %i...' % (worker_endpoint[0], worker_endpoint[1])
        
        wb = work.WorkerBridge(node, my_pubkey_hash, args.donation_percentage, merged_urls, args.worker_fee, args, pubkeys, helpd)
        web_root = web.get_web_root(wb, datadir_path, helpd_getnetworkinfo_var, static_dir=args.web_static)
        caching_wb = worker_interface.CachingWorkerBridge(wb)
        worker_interface.WorkerInterface(caching_wb).attach_to(web_root, get_handler=lambda request: request.redirect('/static/'))
        web_serverfactory = server.Site(web_root)
        
        
        serverfactory = switchprotocol.FirstByteSwitchFactory({'{': stratum.StratumServerFactory(caching_wb)}, web_serverfactory)
        deferral.retry('Error binding to worker port:', traceback=False)(reactor.listenTCP)(worker_endpoint[1], serverfactory, interface=worker_endpoint[0])
        
        with open(os.path.join(os.path.join(datadir_path, 'ready_flag')), 'wb') as f:
            pass
        
        print '    ...success!'
        print
        
        
        # done!
        print 'Started successfully!'
        print 'Go to http://127.0.0.1:%i/ to view graphs and statistics!' % (worker_endpoint[1],)
        if args.donation_percentage > 1.1:
            print '''Donating %.1f%% of work towards P2Pool's development. Thanks for the tip!''' % (args.donation_percentage,)
        elif args.donation_percentage < .9:
            print '''Donating %.1f%% of work towards P2Pool's development. Please donate to encourage further development of P2Pool!''' % (args.donation_percentage,)
        else:
            print '''Donating %.1f%% of work towards P2Pool's development. Thank you!''' % (args.donation_percentage,)
            print 'You can increase this amount with --give-author argument! (or decrease it, if you must)'
        print
        
        
        if hasattr(signal, 'SIGALRM'):
            signal.signal(signal.SIGALRM, lambda signum, frame: reactor.callFromThread(
                sys.stderr.write, 'Watchdog timer went off at:\n' + ''.join(traceback.format_stack())
            ))
            signal.siginterrupt(signal.SIGALRM, False)
            deferral.RobustLoopingCall(signal.alarm, 30).start(1)
        
        if args.irc_announce:
            from twisted.words.protocols import irc
            class IRCClient(irc.IRCClient):
                nickname = 'p2pool%02i' % (random.randrange(100),)
                channel = net.ANNOUNCE_CHANNEL
                def lineReceived(self, line):
                    if p2pool.DEBUG:
                        print repr(line)
                    irc.IRCClient.lineReceived(self, line)
                def signedOn(self):
                    self.in_channel = False
                    irc.IRCClient.signedOn(self)
                    self.factory.resetDelay()
                    self.join(self.channel)
                    @defer.inlineCallbacks
                    def new_share(share):
                        if not self.in_channel:
                            return
                        if share.pow_hash <= share.header['bits'].target and abs(share.timestamp - time.time()) < 10*60:
                            yield deferral.sleep(random.expovariate(1/60))
                            message = '\x02%s BLOCK FOUND by %s! %s%064x' % (net.NAME.upper(), help_data.script2_to_address(share.new_script, net.PARENT), net.PARENT.BLOCK_EXPLORER_URL_PREFIX, share.header_hash)
                            if all('%x' % (share.header_hash,) not in old_message for old_message in self.recent_messages):
                                self.say(self.channel, message)
                                self._remember_message(message)
                    self.watch_id = node.tracker.verified.added.watch(new_share)
                    self.recent_messages = []
                def joined(self, channel):
                    self.in_channel = True
                def left(self, channel):
                    self.in_channel = False
                def _remember_message(self, message):
                    self.recent_messages.append(message)
                    while len(self.recent_messages) > 100:
                        self.recent_messages.pop(0)
                def privmsg(self, user, channel, message):
                    if channel == self.channel:
                        self._remember_message(message)
                def connectionLost(self, reason):
                    node.tracker.verified.added.unwatch(self.watch_id)
                    print 'IRC connection lost:', reason.getErrorMessage()
            class IRCClientFactory(protocol.ReconnectingClientFactory):
                protocol = IRCClient
            reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory(), bindAddress=(worker_endpoint[0], 0))
        
        @defer.inlineCallbacks
        def status_thread():
            last_str = None
            last_time = 0
            while True:
                yield deferral.sleep(3)
                try:
                    height = node.tracker.get_height(node.best_share_var.value)
                    this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                        height,
                        len(node.tracker.verified.items),
                        len(node.tracker.items),
                        len(node.p2p_node.peers),
                        sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
                    ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
                    
                    datums, dt = wb.local_rate_monitor.get_datums_in_last()
                    my_att_s = sum(datum['work']/dt for datum in datums)
                    my_shares_per_s = sum(datum['work']/dt/help_data.target_to_average_attempts(datum['share_target']) for datum in datums)
                    this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                        math.format(int(my_att_s)),
                        math.format_dt(dt),
                        math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                        math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
                    )
                    
                    if height > 2:
                        (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                        stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                        real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                        
                        paystr = ''
                        paytot = 0.0
                        for i in xrange(len(pubkeys.keys)):
                            curtot = node.get_current_txouts().get(help_data.pubkey_hash_to_script2(pubkeys.keys[i]), 0)
                            paytot += curtot*1e-8
                            paystr += "(%.4f)" % (curtot*1e-8,)
                        paystr += "=%.4f" % (paytot,)
                        this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %s %s' % (
                            shares, stale_orphan_shares, stale_doa_shares,
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                            paystr, net.PARENT.SYMBOL,
                        )
                        this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                            math.format(int(real_att_s)),
                            100*stale_prop,
                            math.format_dt(2**256 / node.helpd_work.value['bits'].target / real_att_s),
                        )
                        
                        for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, helpd_getnetworkinfo_var.value, node.helpd_work.value):
                            print >>sys.stderr, '#'*40
                            print >>sys.stderr, '>>> Warning: ' + warning
                            print >>sys.stderr, '#'*40
                        
                        if gc.garbage:
                            print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
                    
                    if this_str != last_str or time.time() > last_time + 15:
                        print this_str
                        last_str = this_str
                        last_time = time.time()
                except:
                    log.err()
        status_thread()
    except:
        reactor.stop()
        log.err(None, 'Fatal error:')
예제 #52
0
파일: proxy.py 프로젝트: DomExpire/eppproxy
    def registerServerProtocol(self, login_frame, server_protocol):
        # extract username and password
        handler = LoginHandler()
        parseString(login_frame, handler)
        if handler.uname is None:
            log.err(
                "Cant find username in client login frame, disconnecting client."
            )
            server_protocol.transport.loseConnection()
            return
        username = handler.uname.strip().encode('UTF-8')

        if handler.pword is None:
            log.err(
                "Cant find password in client login frame, disconnecting client."
            )
            server_protocol.transport.loseConnection()
            return
        password = handler.pword.strip().encode('UTF-8')

        # extract clTRID, if any
        cltrid = None
        if not handler.trid is None:
            cltrid = handler.trid.encode('UTF-8')

        # try to register with exisiting connection
        if username in self.connections:
            the_dict = self.connections[username]
            for client_protocol in the_dict['protocols']:
                if client_protocol.server_protocol is None:
                    if not the_dict['password'] == password:
                        log.err(
                            "Client password does not match cache, disconnecting client."
                        )
                        server_protocol.transport.loseConnection()
                        return
                    client_protocol.server_protocol = server_protocol
                    server_protocol.client_protocol = client_protocol
                    self.server_to_client[server_protocol] = client_protocol
                    if not cltrid is None:
                        response = re.sub(r'<clTRID>.+</clTRID>',
                                          '<clTRID>' + cltrid + '</clTRID>',
                                          client_protocol.login_response_frame)
                        server_protocol.sendFrame(response)
                    else:
                        server_protocol.sendFrame(
                            client_protocol.login_response_frame)
                    log.msg("REUSING CONNECTION TO EPP SERVER")
                    return

        # ok we could not find free existing connection
        def _success(client_protocol):
            if not username in self.connections:
                self.connections[username] = {
                    'password': password,
                    'protocols': [client_protocol]
                }
            else:
                self.connections[username]['protocols'].append(client_protocol)
            self.server_to_client[server_protocol] = client_protocol
            client_protocol.login_frame = login_frame
            client_protocol.server_protocol = server_protocol
            server_protocol.client_protocol = client_protocol
            client_protocol.username = username

        def _fail(reason):
            log.err(reason)
            log.err(
                "Failed connecting to master EPP server, disconnecting client."
            )
            server_protocol.transport.loseConnection()

        log.msg("CREATING NEW CONNECTION TO EPP SERVER")
        self.cc.connectSSL(conf.EPP_HOST, conf.EPP_PORT,
                           self.ccf).addCallbacks(_success, _fail)
예제 #53
0
 def failed(result):
     log.err(result, "Connection setup failed.")
예제 #54
0
def failed(err):
    log.startLogging(sys.stderr)
    log.err(err)
예제 #55
0
    def _socketCallback(self, cfSocket, callbackType,
                        ignoredAddress, ignoredData, context):
        """
        The socket callback issued by CFRunLoop.  This will issue C{doRead} or
        C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
        registered with the file descriptor that we are being notified of.

        @param cfSocket: The L{CFSocket} which has got some activity.

        @param callbackType: The type of activity that we are being notified
            of.  Either L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}.

        @param ignoredAddress: Unused, because this is not used for either of
            the callback types we register for.

        @param ignoredData: Unused, because this is not used for either of the
            callback types we register for.

        @param context: The data associated with this callback by
            L{CFSocketCreateWithNative} (in L{CFReactor._watchFD}).  A 2-tuple
            of C{(int, CFRunLoopSource)}.
        """
        (fd, smugglesrc) = context
        if fd not in self._fdmap:
            # Spurious notifications seem to be generated sometimes if you
            # CFSocketDisableCallBacks in the middle of an event.  I don't know
            # about this FD, any more, so let's get rid of it.
            CFRunLoopRemoveSource(
                self._cfrunloop, smugglesrc, kCFRunLoopCommonModes
            )
            return

        why = None
        isRead = False
        src, skt, readWriteDescriptor, rw = self._fdmap[fd]
        try:
            if readWriteDescriptor.fileno() == -1:
                why = _NO_FILEDESC
            else:
                isRead = callbackType == kCFSocketReadCallBack
                # CFSocket seems to deliver duplicate read/write notifications
                # sometimes, especially a duplicate writability notification
                # when first registering the socket.  This bears further
                # investigation, since I may have been mis-interpreting the
                # behavior I was seeing. (Running the full Twisted test suite,
                # while thorough, is not always entirely clear.) Until this has
                # been more thoroughly investigated , we consult our own
                # reading/writing state flags to determine whether we should
                # actually attempt a doRead/doWrite first.  -glyph
                if isRead:
                    if rw[_READ]:
                        why = log.callWithLogger(
                            readWriteDescriptor, readWriteDescriptor.doRead)
                else:
                    if rw[_WRITE]:
                        why = log.callWithLogger(
                            readWriteDescriptor, readWriteDescriptor.doWrite)
        except:
            why = sys.exc_info()[1]
            log.err()
        if why:
            self._disconnectSelectable(readWriteDescriptor, why, isRead)
예제 #56
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             height = node.tracker.get_height(node.best_share_var.value)
             this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                 height,
                 len(node.tracker.verified.items),
                 len(node.tracker.items),
                 len(node.p2p_node.peers),
                 sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
             ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
             
             datums, dt = wb.local_rate_monitor.get_datums_in_last()
             my_att_s = sum(datum['work']/dt for datum in datums)
             my_shares_per_s = sum(datum['work']/dt/help_data.target_to_average_attempts(datum['share_target']) for datum in datums)
             this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                 math.format(int(my_att_s)),
                 math.format_dt(dt),
                 math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                 math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
             )
             
             if height > 2:
                 (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                 stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                 real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                 
                 paystr = ''
                 paytot = 0.0
                 for i in xrange(len(pubkeys.keys)):
                     curtot = node.get_current_txouts().get(help_data.pubkey_hash_to_script2(pubkeys.keys[i]), 0)
                     paytot += curtot*1e-8
                     paystr += "(%.4f)" % (curtot*1e-8,)
                 paystr += "=%.4f" % (paytot,)
                 this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %s %s' % (
                     shares, stale_orphan_shares, stale_doa_shares,
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                     paystr, net.PARENT.SYMBOL,
                 )
                 this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                     math.format(int(real_att_s)),
                     100*stale_prop,
                     math.format_dt(2**256 / node.helpd_work.value['bits'].target / real_att_s),
                 )
                 
                 for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, helpd_getnetworkinfo_var.value, node.helpd_work.value):
                     print >>sys.stderr, '#'*40
                     print >>sys.stderr, '>>> Warning: ' + warning
                     print >>sys.stderr, '#'*40
                 
                 if gc.garbage:
                     print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
             
             if this_str != last_str or time.time() > last_time + 15:
                 print this_str
                 last_str = this_str
                 last_time = time.time()
         except:
             log.err()
예제 #57
0
 def _(err):
     log.err(err, 'Error submitting merged block:')
예제 #58
0
 def run(self, result):
     try:
         raise RuntimeError("error that occurs outside of a test")
     except RuntimeError:
         log.err(failure.Failure())
예제 #59
0
        def got_response(header, user, coinbase_nonce):
            assert len(coinbase_nonce) == self.COINBASE_NONCE_LENGTH
            new_packed_gentx = packed_gentx[:-self.COINBASE_NONCE_LENGTH - 4] + coinbase_nonce + packed_gentx[
                -4:] if coinbase_nonce != '\0' * self.COINBASE_NONCE_LENGTH else packed_gentx
            new_gentx = bitcoin_data.tx_type.unpack(
                new_packed_gentx
            ) if coinbase_nonce != '\0' * self.COINBASE_NONCE_LENGTH else gentx

            header_hash = bitcoin_data.hash256(
                bitcoin_data.block_header_type.pack(header))
            pow_hash = self.node.net.PARENT.POW_FUNC(
                bitcoin_data.block_header_type.pack(header))
            try:
                if pow_hash <= header['bits'].target or p2pool.DEBUG:
                    helper.submit_block(
                        dict(header=header,
                             txs=[new_gentx] + other_transactions), False,
                        self.node.factory, self.node.bitcoind,
                        self.node.bitcoind_work, self.node.net)
                    if pow_hash <= header['bits'].target:
                        print
                        print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (
                            self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
                            header_hash)
                        print
            except:
                log.err(None, 'Error while processing potential block:')

            user, _, _, _ = self.get_user_details(user)
            assert header['previous_block'] == ba['previous_block']
            assert header['merkle_root'] == bitcoin_data.check_merkle_link(
                bitcoin_data.hash256(new_packed_gentx), merkle_link)
            assert header['bits'] == ba['bits']

            on_time = self.new_work_event.times == lp_count

            for aux_work, index, hashes in mm_later:
                try:
                    if pow_hash <= aux_work['target'] or p2pool.DEBUG:
                        df = deferral.retry(
                            'Error submitting merged block: (will retry)', 10,
                            10)(aux_work['merged_proxy'].rpc_getauxblock)(
                                pack.IntType(256, 'big').pack(
                                    aux_work['hash']).encode('hex'),
                                bitcoin_data.aux_pow_type.pack(
                                    dict(
                                        merkle_tx=dict(
                                            tx=new_gentx,
                                            block_hash=header_hash,
                                            merkle_link=merkle_link,
                                        ),
                                        merkle_link=bitcoin_data.
                                        calculate_merkle_link(hashes, index),
                                        parent_block_header=header,
                                    )).encode('hex'),
                            )

                        @df.addCallback
                        def _(result, aux_work=aux_work):
                            if result != (pow_hash <= aux_work['target']):
                                print >> sys.stderr, 'Merged block submittal result: %s Expected: %s' % (
                                    result, pow_hash <= aux_work['target'])
                            else:
                                print 'Merged block submittal result: %s' % (
                                    result, )

                        @df.addErrback
                        def _(err):
                            log.err(err, 'Error submitting merged block:')
                except:
                    log.err(None, 'Error while processing merged mining POW:')

            if pow_hash <= share_info[
                    'bits'].target and header_hash not in received_header_hashes:
                last_txout_nonce = pack.IntType(
                    8 * self.COINBASE_NONCE_LENGTH).unpack(coinbase_nonce)
                share = get_share(header, last_txout_nonce)

                print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
                    user,
                    p2pool_data.format_hash(share.hash),
                    p2pool_data.format_hash(share.previous_hash),
                    time.time() - getwork_time,
                    ' DEAD ON ARRIVAL' if not on_time else '',
                )
                self.my_share_hashes.add(share.hash)
                if not on_time:
                    self.my_doa_share_hashes.add(share.hash)

                self.node.tracker.add(share)
                self.node.set_best_share()

                try:
                    if (pow_hash <= header['bits'].target or
                            p2pool.DEBUG) and self.node.p2p_node is not None:
                        self.node.p2p_node.broadcast_share(share.hash)
                except:
                    log.err(None, 'Error forwarding block solution:')

                self.share_received.happened(
                    bitcoin_data.target_to_average_attempts(share.target),
                    not on_time, share.hash)

            if pow_hash > target:
                print 'Worker %s submitted share with hash > target:' % (
                    user, )
                print '    Hash:   %56x' % (pow_hash, )
                print '    Target: %56x' % (target, )
            elif header_hash in received_header_hashes:
                print >> sys.stderr, 'Worker %s submitted share more than once!' % (
                    user, )
            else:
                received_header_hashes.add(header_hash)

                self.pseudoshare_received.happened(
                    bitcoin_data.target_to_average_attempts(target),
                    not on_time, user)
                self.recent_shares_ts_work.append(
                    (time.time(),
                     bitcoin_data.target_to_average_attempts(target)))
                while len(self.recent_shares_ts_work) > 50:
                    self.recent_shares_ts_work.pop(0)
                self.local_rate_monitor.add_datum(
                    dict(work=bitcoin_data.target_to_average_attempts(target),
                         dead=not on_time,
                         user=user,
                         share_target=share_info['bits'].target))
                self.local_addr_rate_monitor.add_datum(
                    dict(work=bitcoin_data.target_to_average_attempts(target),
                         pubkey_hash=pubkey_hash))

            return on_time
예제 #60
0
 def exception(failure):
     log.err(failure)
     errs = self.flushLoggedErrors(SynchronousException)
     self.assertEqual(len(errs), 2)