def send_mail(self, content_dict):
        mail = InputMail.from_dict(content_dict, self.account_email)
        draft_id = content_dict.get('ident')
        yield self.mail_sender.sendmail(mail)

        sent_mail = yield self.move_to_sent(draft_id, mail)
        defer.returnValue(sent_mail)
Example #2
0
    def on_POST(self, request):
        register_json = _parse_json(request)

        session = (register_json["session"] if "session" in register_json
                  else None)
        login_type = None
        if "type" not in register_json:
            raise SynapseError(400, "Missing 'type' key.")

        try:
            login_type = register_json["type"]
            stages = {
                LoginType.RECAPTCHA: self._do_recaptcha,
                LoginType.PASSWORD: self._do_password,
                LoginType.EMAIL_IDENTITY: self._do_email_identity
            }

            session_info = self._get_session_info(request, session)
            logger.debug("%s : session info %s   request info %s",
                         login_type, session_info, register_json)
            response = yield stages[login_type](
                request,
                register_json,
                session_info
            )

            if "access_token" not in response:
                # isn't a final response
                response["session"] = session_info["id"]

            defer.returnValue((200, response))
        except KeyError as e:
            logger.exception(e)
            raise SynapseError(400, "Missing JSON keys for login type %s." % login_type)
Example #3
0
 def incremental(self):
     updatable = yield self._sourcedirIsUpdatable()
     if updatable:
         rv = yield self.doUpdate()
     else:
         rv = yield self.clobber()
     defer.returnValue(rv)
Example #4
0
    def on_GET(self, origin, _content, query, context, user_id):
        """
        Args:
            origin (unicode): The authenticated server_name of the calling server

            _content (None): (GETs don't have bodies)

            query (dict[bytes, list[bytes]]): Query params from the request.

            **kwargs (dict[unicode, unicode]): the dict mapping keys to path
                components as specified in the path match regexp.

        Returns:
            Deferred[(int, object)|None]: either (response code, response object) to
                 return a JSON response, or None if the request has already been handled.
        """
        versions = query.get(b'ver')
        if versions is not None:
            supported_versions = [v.decode("utf-8") for v in versions]
        else:
            supported_versions = ["1"]

        content = yield self.handler.on_make_join_request(
            origin, context, user_id,
            supported_versions=supported_versions,
        )
        defer.returnValue((200, content))
Example #5
0
    def _do_password(self, request, register_json, session):
        if (self.hs.config.enable_registration_captcha and
                not session[LoginType.RECAPTCHA]):
            # captcha should've been done by this stage!
            raise SynapseError(400, "Captcha is required.")

        if ("user" in session and "user" in register_json and
                session["user"] != register_json["user"]):
            raise SynapseError(400, "Cannot change user ID during registration")

        password = register_json["password"].encode("utf-8")
        desired_user_id = (register_json["user"].encode("utf-8") if "user"
                          in register_json else None)
        if desired_user_id and urllib.quote(desired_user_id) != desired_user_id:
            raise SynapseError(
                400,
                "User ID must only contain characters which do not " +
                "require URL encoding.")
        handler = self.handlers.registration_handler
        (user_id, token) = yield handler.register(
            localpart=desired_user_id,
            password=password
        )

        if session[LoginType.EMAIL_IDENTITY]:
            yield handler.bind_emails(user_id, session["threepidCreds"])

        result = {
            "user_id": user_id,
            "access_token": token,
            "home_server": self.hs.hostname,
        }
        self._remove_session(session)
        defer.returnValue(result)
Example #6
0
    def calendar_query(self, calendar_uri, query, got_xml, data, no_init):

        if not no_init:
            response = yield self.send(SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authid="wsanchez"))
            response = IResponse(response)
            if response.code != responsecode.CREATED:
                self.fail("MKCALENDAR failed: %s" % (response.code,))

            if data:
                for filename, icaldata in data.iteritems():
                    request = SimpleStoreRequest(self, "PUT", joinURL(calendar_uri, filename + ".ics"), authid="wsanchez")
                    request.stream = MemoryStream(icaldata)
                    yield self.send(request)
            else:
                # Add holiday events to calendar
                for child in FilePath(self.holidays_dir).children():
                    if os.path.splitext(child.basename())[1] != ".ics":
                        continue
                    request = SimpleStoreRequest(self, "PUT", joinURL(calendar_uri, child.basename()), authid="wsanchez")
                    request.stream = MemoryStream(child.getContent())
                    yield self.send(request)

        request = SimpleStoreRequest(self, "REPORT", calendar_uri, authid="wsanchez")
        request.stream = MemoryStream(query.toxml())
        response = yield self.send(request)

        response = IResponse(response)

        if response.code != responsecode.MULTI_STATUS:
            self.fail("REPORT failed: %s" % (response.code,))

        returnValue(
            (yield davXMLFromStream(response.stream).addCallback(got_xml))
        )
Example #7
0
    def _sourcedirIsUpdatable(self):
        myFileWriter = StringFileWriter()
        args = {
                'workdir': self.build.path_module.join(self.workdir, 'CVS'),
                'writer': myFileWriter,
                'maxsize': None,
                'blocksize': 32*1024,
                }

        cmd = buildstep.RemoteCommand('uploadFile',
                dict(slavesrc='Root', **args),
                ignore_updates=True)
        yield self.runCommand(cmd)
        if cmd.rc is not None and cmd.rc != 0:
            defer.returnValue(False)
            return
        if myFileWriter.buffer.strip() != self.cvsroot:
            defer.returnValue(False)
            return

        myFileWriter.buffer = ""
        cmd = buildstep.RemoteCommand('uploadFile',
                dict(slavesrc='Repository', **args),
                ignore_updates=True)
        yield self.runCommand(cmd)
        if cmd.rc is not None and cmd.rc != 0:
            defer.returnValue(False)
            return
        if myFileWriter.buffer.strip() != self.cvsmodule:
            defer.returnValue(False)
            return

        defer.returnValue(True)
Example #8
0
    def performAction(self, req):
        try:
            request_id = req.args.get("id", [None])[0]
            if request_id == "all":
                cancel_all = True
            else:
                cancel_all = False
                request_id = int(request_id)
        except:
            request_id = None

        authz = self.getAuthz(req)
        if request_id:
            c = interfaces.IControl(self.getBuildmaster(req))
            builder_control = c.getBuilder(self.builder_status.getName())

            brcontrols = yield builder_control.getPendingBuildRequestControls()

            for build_req in brcontrols:
                if cancel_all or (build_req.brid == request_id):
                    log.msg("Cancelling %s" % build_req)
                    res = yield authz.actionAllowed('cancelPendingBuild', req,
                                                                build_req)
                    if res:
                        build_req.cancel()
                    else:
                        defer.returnValue(path_to_authzfail(req))
                        return
                    if not cancel_all:
                        break

        defer.returnValue(path_to_builder(req, self.builder_status))
Example #9
0
    def force(self, req, builderNames):
        master = self.getBuildmaster(req)
        owner = self.getAuthz(req).getUsernameFull(req)
        schedulername = req.args.get("forcescheduler", ["<unknown>"])[0]
        if schedulername == "<unknown>":
            defer.returnValue((path_to_builder(req, self.builder_status),
                               "forcescheduler arg not found"))
            return

        args = {}
        # decode all of the args
        encoding = getRequestCharset(req)
        for name, argl in req.args.iteritems():
           if name == "checkbox":
               # damn html's ungeneric checkbox implementation...
               for cb in argl:
                   args[cb.decode(encoding)] = True
           else:
               args[name] = [ arg.decode(encoding) for arg in argl ]

        for sch in master.allSchedulers():
            if schedulername == sch.name:
                try:
                    yield sch.force(owner, builderNames, **args)
                    msg = ""
                except ValidationError, e:
                    msg = html.escape(e.message.encode('ascii','ignore'))
                break
Example #10
0
    def start(self, fire_hooks=True, start_relations=True):
        """Invoke the start hook, and setup relation watching.

        :param fire_hooks: False to skip running config-change and start hooks.
            Will not affect any relation hooks that happen to be fired as a
            consequence of starting up.

        :param start_relations: True to transition all "down" relation
            workflows to "up".
        """
        self._log.debug("pre-start acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        self._log.debug("start running, unit lifecycle")
        watches = []

        try:
            if fire_hooks:
                yield self._execute_hook("config-changed")
                yield self._execute_hook("start")

            if self._relations is None:
                yield self._load_relations()

            if start_relations:
                # We actually want to transition from "down" to "up" where
                # applicable (ie a stopped unit is starting up again)
                for workflow in self._relations.values():
                    with (yield workflow.lock()):
                        state = yield workflow.get_state()
                        if state == "down":
                            yield workflow.transition_state("up")

            # Establish a watch on the existing relations.
            if not self._watching_relation_memberships:
                self._log.debug("starting service relation watch")
                watches.append(self._service.watch_relation_states(
                    self._on_service_relation_changes))
                self._watching_relation_memberships = True

            # Establish a watch for resolved relations
            if not self._watching_relation_resolved:
                self._log.debug("starting unit relation resolved watch")
                watches.append(self._unit.watch_relation_resolved(
                    self._on_relation_resolved_changes))
                self._watching_relation_resolved = True

            # Set current status
            self._running = True
        finally:
            self._run_lock.release()

        # Give up the run lock before waiting on initial watch invocations.
        results = yield DeferredList(watches, consumeErrors=True)

        # If there's an error reraise the first one found.
        errors = [e[1] for e in results if not e[0]]
        if errors:
            returnValue(errors[0])

        self._log.debug("started unit lifecycle")
Example #11
0
    def performAction(self, req):
        authz = self.getAuthz(req)
        res = yield authz.actionAllowed('stopAllBuilds', req)
        if not res:
            defer.returnValue(path_to_authzfail(req))
            return

        builders = None
        if self.selectedOrAll == 'all':
            builders = self.status.getBuilderNames()
        elif self.selectedOrAll == 'selected':
            builders = [b for b in req.args.get("selected", []) if b]

        for bname in builders:
            builder_status = self.status.getBuilder(bname)
            (state, current_builds) = builder_status.getState()
            if state != "building":
                continue
            for b in current_builds:
                build_status = builder_status.getBuild(b.number)
                if not build_status:
                    continue
                build = StatusResourceBuild(build_status)
                build.stop(req, auth_ok=True)

        # go back to the welcome page
        defer.returnValue(path_to_root(req))
Example #12
0
 def mock_event(self, msg, nr_events):
     self.mock_response(msg)
     yield self.tx_helper.make_dispatch_outbound(
         "foo", to_addr='2371234567', message_id='id_%s' % (msg,))
     yield self.fake_cellulant.get()
     events = yield self.tx_helper.wait_for_dispatched_events(nr_events)
     returnValue(events)
Example #13
0
    def get_current_state(self, room_id, event_type=None, state_key=""):
        if event_type and state_key is not None:
            result = yield self.get_current_state_for_key(
                room_id, event_type, state_key
            )
            defer.returnValue(result)

        def f(txn):
            sql = (
                "SELECT event_id FROM current_state_events"
                " WHERE room_id = ? "
            )

            if event_type and state_key is not None:
                sql += " AND type = ? AND state_key = ? "
                args = (room_id, event_type, state_key)
            elif event_type:
                sql += " AND type = ?"
                args = (room_id, event_type)
            else:
                args = (room_id, )

            txn.execute(sql, args)
            results = txn.fetchall()

            return [r[0] for r in results]

        event_ids = yield self.runInteraction("get_current_state", f)
        events = yield self._get_events(event_ids, get_prev_content=False)
        defer.returnValue(events)
Example #14
0
    def _mergeRequests(self, breq, unclaimed_requests, mergeRequests_fn):
        """Use C{mergeRequests_fn} to merge C{breq} against
        C{unclaimed_requests}, where both are build request dictionaries"""
        # short circuit if there is no merging to do
        if not mergeRequests_fn or len(unclaimed_requests) == 1:
            defer.returnValue([ breq ])
            return

        # we'll need BuildRequest objects, so get those first
        unclaimed_request_objects = yield defer.gatherResults(
                [ self._brdictToBuildRequest(brdict)
                  for brdict in unclaimed_requests ])

        breq_object = unclaimed_request_objects[unclaimed_requests.index(breq)]

        # gather the mergeable requests
        merged_request_objects = []
        for other_breq_object in unclaimed_request_objects:
            if (yield defer.maybeDeferred(
                        lambda : mergeRequests_fn(self, breq_object,
                                                  other_breq_object))):
                merged_request_objects.append(other_breq_object)

        # convert them back to brdicts and return
        merged_requests = [ br.brdict for br in merged_request_objects ]
        defer.returnValue(merged_requests)
Example #15
0
    def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None):
        if not bs.isFinished():
            return

        # Make a copy of the properties so as not to modify the original build.
        properties = Properties()
        # Don't include runtime-set properties in a rebuild request
        properties.updateFromPropertiesNoRuntime(bs.getProperties())
        if extraProperties is None:
            properties.updateFromProperties(extraProperties)

        properties_dict = dict((k,(v,s)) for (k,v,s) in properties.asList())
        ssList = bs.getSourceStamps(absolute=True)
        
        if ssList:
            sourcestampsetid = yield  ssList[0].getSourceStampSetId(self.control.master)
            dl = []
            for ss in ssList[1:]:
                # add defered to the list
                dl.append(ss.addSourceStampToDatabase(self.control.master, sourcestampsetid))
            yield defer.gatherResults(dl)

            bsid, brids = yield self.control.master.addBuildset(
                    builderNames=[self.original.name],
                    sourcestampsetid=sourcestampsetid, 
                    reason=reason, 
                    properties=properties_dict)
            defer.returnValue((bsid, brids))
        else:
            log.msg('Cannot start rebuild, rebuild has no sourcestamps for a new build')
            defer.returnValue(None)
Example #16
0
 def delete_search_index(self, index):
     if not (yield self.pb_search_admin()):
         raise NotImplementedError("Yokozuna administration is not "
                                   "supported for this version")
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.delete_search_index(index)
         defer.returnValue(True)
Example #17
0
 def create_search_schema(self, schema, content):
     if not (yield self.pb_search_admin()):
         raise NotImplementedError("Yokozuna administration is not "
                                   "supported for this version")
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.create_search_schema(schema,content)
         defer.returnValue(True)
Example #18
0
 def get_index(self, bucket, index, startkey, endkey=None,return_terms=False, max_results=None, continuation=None,bucket_type = 'default'):
     '''
     message RpbIndexResp {
         repeated bytes keys = 1;
         repeated RpbPair results = 2;
         optional bytes continuation = 3;
         optional bool done = 4;
     }
     return (results,continuation)
     '''
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.get_index(bucket, index, startkey, endkey=endkey,return_terms=return_terms, max_results=max_results, continuation=continuation,bucket_type=bucket_type)
         results = []
         if not return_terms or not endkey:
             for resp in ret:
                 if resp.keys:
                     results.extend(resp.keys)
         else:
             for resp in ret:
                 if resp.results:
                     for pair in resp.results:
                         results.append((pair.key,pair.value))
         if max_results:
             defer.returnValue(({'keys':results,'continuation':resp.continuation}))
         else:
             defer.returnValue(({'keys':results,continuation:None}))
Example #19
0
 def create_search_index(self, index, schema=None, n_val=None):
     if not (yield self.pb_search_admin()):
         raise NotImplementedError("Yokozuna administration is not "
                                   "supported for this version")
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.create_search_index(index, schema=schema, n_val=n_val)
         defer.returnValue(ret)
Example #20
0
    def update_datatype(self, datatype, w=None, dw=None,
                        pw=None, return_body=None, timeout=None,
                        include_context=None,bucket_type = None):
        """
        Updates a Riak Datatype. This operation is not idempotent and
        so will not be retried automatically.

        :param datatype: the datatype to update
        :type datatype: a subclass of :class:`~riak.datatypes.Datatype`
        :param w: the write quorum
        :type w: integer, string, None
        :param dw: the durable write quorum
        :type dw: integer, string, None
        :param pw: the primary write quorum
        :type pw: integer, string, None
        :param timeout: a timeout value in milliseconds
        :type timeout: int
        :param include_context: whether to return the opaque context
          as well as the value, which is useful for removal operations
          on sets and maps
        :type include_context: bool
        :rtype: a subclass of :class:`~riak.datatypes.Datatype`, bool
        """
        with (yield self._getFreeTransport()) as transport:
            result = yield transport.update_datatype(datatype,w=w,
                                           dw=dw, pw=pw,
                                           return_body=return_body,
                                           timeout=timeout,
                                           include_context=include_context)
            defer.returnValue(result)
Example #21
0
 def set_bucket_props(self, bucket, props):
     """
     Set bucket properties
     """
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.setBucketProperties(bucket.name,bucket.bucket_type, **props)
     defer.returnValue(ret == True)
Example #22
0
 def ping(self):
     """
     Check server is alive
     """
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.ping()
     defer.returnValue(ret == True)
Example #23
0
    def stepDone(self, results, step):
        """This method is called when the BuildStep completes. It is passed a
        status object from the BuildStep and is responsible for merging the
        Step's results into those of the overall Build."""

        terminate = False
        text = None
        if isinstance(results, tuple):
            results, text = results
        assert isinstance(results, type(SUCCESS)), "got %r" % (results,)
        summary = yield step.getBuildResultSummary()
        if 'build' in summary:
            text = [summary['build']]
        log.msg(" step '%s' complete: %s (%s)" % (step.name, statusToString(results), text))
        if text:
            self.text.extend(text)
            self.master.data.updates.setBuildStateString(self.buildid,
                                                         bytes2unicode(" ".join(self.text)))
        self.results, terminate = computeResultAndTermination(step, results,
                                                              self.results)
        if not self.conn:
            # force the results to retry if the connection was lost
            self.results = RETRY
            terminate = True
        defer.returnValue(terminate)
Example #24
0
    def _fetch(self, _):
        fetch_required = True

        # If the revision already exists in the repo, we dont need to fetch.
        if self.revision:
            rc = yield self._dovccmd(["cat-file", "-e", self.revision], abandonOnFailure=False)
            if rc == RC_SUCCESS:
                fetch_required = False

        if fetch_required:
            command = ["fetch", "-t", self.repourl, self.branch]
            # If the 'progress' option is set, tell git fetch to output
            # progress information to the log. This can solve issues with
            # long fetches killed due to lack of output, but only works
            # with Git 1.7.2 or later.
            if self.prog:
                command.append("--progress")

            yield self._dovccmd(command)

        if self.revision:
            rev = self.revision
        else:
            rev = "FETCH_HEAD"
        command = ["reset", "--hard", rev, "--"]
        abandonOnFailure = not self.retryFetch and not self.clobberOnFailure
        res = yield self._dovccmd(command, abandonOnFailure)

        # Rename the branch if needed.
        if res == RC_SUCCESS and self.branch != "HEAD":
            # Ignore errors
            yield self._dovccmd(["branch", "-M", self.branch], abandonOnFailure=False)

        defer.returnValue(res)
Example #25
0
    def get_raw(self, uri, args={}):
        """ Gets raw text from the given URI.

        Args:
            uri (str): The URI to request, not including query parameters
            args (dict): A dictionary used to create query strings, defaults to
                None.
                **Note**: The value of each key is assumed to be an iterable
                and *not* a string.
        Returns:
            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
            HTTP body at text.
        Raises:
            On a non-2xx HTTP response. The response body will be used as the
            error message.
        """
        if len(args):
            query_bytes = urllib.urlencode(args, True)
            uri = "%s?%s" % (uri, query_bytes)

        response = yield self.request(
            "GET",
            uri.encode("ascii"),
            headers=Headers({
                b"User-Agent": [self.user_agent],
            })
        )

        body = yield preserve_context_over_fn(readBody, response)

        if 200 <= response.code < 300:
            defer.returnValue(body)
        else:
            raise CodeMessageException(response.code, body)
Example #26
0
 def uploadSources():
     for source in sources:
         result = yield self.startUpload(source, masterdest)
         if result == FAILURE:
             defer.returnValue(FAILURE)
             return
     defer.returnValue(SUCCESS)
Example #27
0
    def parseCommitDescription(self, _=None):
        if self.getDescription == False:  # dict() should not return here
            defer.returnValue(RC_SUCCESS)
            return

        cmd = ["describe"]
        if isinstance(self.getDescription, dict):
            for opt, arg in git_describe_flags:
                opt = self.getDescription.get(opt, None)
                arg = arg(opt)
                if arg:
                    cmd.extend(arg)
        # 'git describe' takes a commitish as an argument for all options
        # *except* --dirty
        if not any(arg.startswith("--dirty") for arg in cmd):
            cmd.append("HEAD")

        try:
            stdout = yield self._dovccmd(cmd, collectStdout=True)
            desc = stdout.strip()
            self.updateSourceProperty("commit-description", desc)
        except Exception:
            pass

        defer.returnValue(RC_SUCCESS)
    def register_agent_instance(self, agent, descriptor=None):
        """
        Client method to register a Agent Instance
        @param agent takes in the agent to create a class and register a new instrument
        @param descriptor The empty, partial or full storage area for additial,
            subclass-specific values.
        """
        assert ((descriptor == None) or
               (isinstance(descriptor, coi_resource_descriptions.AgentInstance)))

        if isinstance(agent, coi_resource_descriptions.AgentInstance):
            agent_resource = agent
            assert agent_resource.RegistryIdentity, 'Agent Resource must have a registry Identity'

        else:
            agent_instance = agent
            # Build a new description of this agent instance
            agent_resource = yield self.describe_instance(agent_instance, descriptor)

            found_sir = yield self.find_registered_agent_instance_from_description(agent_resource)
            if found_sir:
                assert len(found_sir) == 1
                defer.returnValue(found_sir[0])
            else:
                agent_resource.create_new_reference()
                agent_resource.set_lifecyclestate(dataobject.LCStates.developed)

        agent_resource = yield self.base_register_resource('register_agent_instance',agent_resource)
        defer.returnValue(agent_resource)
Example #29
0
    def on_GET(self, origin, content, query):
        if self.deny_access:
            raise FederationDeniedError(origin)

        limit = parse_integer_from_args(query, "limit", 0)
        since_token = parse_string_from_args(query, "since", None)
        include_all_networks = parse_boolean_from_args(
            query, "include_all_networks", False
        )
        third_party_instance_id = parse_string_from_args(
            query, "third_party_instance_id", None
        )

        if include_all_networks:
            network_tuple = None
        elif third_party_instance_id:
            network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
        else:
            network_tuple = ThirdPartyInstanceID(None, None)

        data = yield self.handler.get_local_public_room_list(
            limit, since_token,
            network_tuple=network_tuple,
            from_federation=True,
        )
        defer.returnValue((200, data))
Example #30
0
 def update_counter(self, bucket, key, value, **params):
     counters = yield self.counters()
     if not counters:
         raise NotImplementedError("Counters are not supported")
     with (yield self._getFreeTransport()) as transport:
         ret = yield transport.update_counter(bucket,key,value,**params)
         defer.returnValue(ret)
Example #31
0
def chat_private(p, req):
    ''' 私聊 '''
    cid, [rcv_nick_name, content] = req

    user = g_UserMgr.getUserByCid(cid)
    if not user:
        log.error('Can not find user. cid: {0}.'.format( cid ))
        defer.returnValue( CONNECTION_LOSE )

    _status = yield check_character_mute(int(cid))
    if _status:
        defer.returnValue( CHAR_IN_MUTE )

    rcv_cid = yield redis.hget(DICT_NICKNAME_REGISTERED, rcv_nick_name)
    if not rcv_cid:
        log.error('Can not find user. sender_id: {0}, rcv_cid: {1}, rcv_nick_name: {2}.'.format( cid, rcv_cid, rcv_nick_name ))
        defer.returnValue( UNKNOWN_CHAR )

    if rcv_cid == cid:
        defer.returnValue( UNKNOWN_ERROR )

    rcv_user = g_UserMgr.getUserByCid( rcv_cid )
    if not rcv_user:
        log.error('The user had not online. sender_id: {0}, rcv_cid: {1}.'.format( cid, rcv_cid ))
        defer.returnValue( CHAR_NOT_ONLINE )

    res_err = g_Private.new_msg( user, rcv_user, content )

    defer.returnValue( res_err )
Example #32
0
            log.err("Failed to parse collector response %s" % backend_response)
            log.exception(e)
            raise errors.OONIBReportCreationError

        if response.code == 406:
            # XXX make this more strict
            log.err("The specified input or nettests cannot be submitted to "
                    "this collector.")
            log.msg("Try running a different test or try reporting to a "
                    "different collector.")
            raise errors.OONIBReportCreationError

        self.reportID = parsed_response['report_id']
        self.backendVersion = parsed_response['backend_version']
        log.debug("Created report with id %s" % parsed_response['report_id'])
        defer.returnValue(parsed_response['report_id'])

    def finish(self):
        url = self.collectorAddress + '/report/' + self.reportID + '/close'
        log.debug("Closing the report %s" % url)
        return self.agent.request("POST", str(url))


class OONIBReportLog(object):

    """
    Used to keep track of report creation on a collector backend.
    """

    def __init__(self, file_name=None):
        if file_name is None:
    def collect(self, device, log):
        """
        Collect results of the class' queries and commands.

        This method can be overridden if more complex collection is
        required.
        """
        try:
            conn_info = self.conn_info(device)
        except UnauthorizedError as e:
            msg = "Error on {}: {}".format(device.id, e.message)
            self._send_event(msg,
                             device.id,
                             ZenEventClasses.Error,
                             eventClass='/Status/Winrm',
                             summary=msg)
            raise e
        client = self.client(conn_info)

        results = {}
        queries = self.get_queries()
        if queries:
            query_map = {
                enuminfo: key
                for key, enuminfo in self.enuminfo_tuples()
            }

            # Silence winrm logging. We want to control the message.
            winrm_log = logging.getLogger('winrm')
            winrm_log.setLevel(logging.FATAL)

            try:
                query_results = yield client.do_collect(query_map.iterkeys())
            except Exception as e:
                self.log_error(log, device, e)
            else:
                for info, data in query_results.iteritems():
                    results[query_map[info]] = data

        # Get associators.
        associators = self.get_associators()

        if associators:
            assoc_client = AssociatorClient(conn_info)
            for assoc_key, associator in associators.iteritems():
                try:
                    if not associator.get('kwargs'):
                        assoc_result = yield assoc_client.associate(
                            associator['seed_class'],
                            associator['associations'])
                    else:
                        assoc_result = yield assoc_client.associate(
                            associator['seed_class'],
                            associator['associations'], **associator['kwargs'])

                except Exception as e:
                    if 'No results for seed class' in e.message:
                        message = 'No results returned for {}. Check WinRM server'\
                                  ' configuration and z properties.'.format(self.name())
                        e = Exception(message)
                    self.log_error(log, device, e)
                else:
                    results[assoc_key] = assoc_result

        # Get a copy of the class' commands.
        commands = dict(self.get_commands())

        # Add PowerShell commands to commands.
        powershell_commands = self.get_powershell_commands()
        if powershell_commands:
            for psc_key, psc in powershell_commands.iteritems():
                commands[psc_key] = '"& {{{}}}"'.format(psc)

        if commands:
            for command_key, command in commands.iteritems():
                winrs_client = SingleCommandClient(conn_info)
                try:
                    if command.startswith('"&'):
                        results[command_key] = yield winrs_client.run_command(
                            POWERSHELL_PREFIX, ps_script=command)
                    else:
                        results[command_key] = yield winrs_client.run_command(
                            command)
                except Exception as e:
                    self.log_error(log, device, e)

        msg = 'Collection completed for %s'
        for eventClass in ('/Status/Winrm', '/Status/Kerberos'):
            self._send_event(msg % device.id,
                             device.id,
                             ZenEventClasses.Clear,
                             eventClass=eventClass)

        defer.returnValue(results)
Example #34
0
    def _sourcedirIsUpdatable(self):
        if self.workerVersionIsOlderThan('listdir', '2.16'):
            git_path = self.build.path_module.join(self.workdir, '.git')
            exists = yield self.pathExists(git_path)

            if exists:
                defer.returnValue("update")

            defer.returnValue("clone")

        cmd = buildstep.RemoteCommand('listdir',
                                      {'dir': self.workdir,
                                       'logEnviron': self.logEnviron,
                                       'timeout': self.timeout, })
        cmd.useLog(self.stdio_log, False)
        yield self.runCommand(cmd)

        if 'files' not in cmd.updates:
            # no files - directory doesn't exist
            defer.returnValue("clone")
        files = cmd.updates['files'][0]
        if '.git' in files:
            defer.returnValue("update")
        elif files:
            defer.returnValue("clobber")
        else:
            defer.returnValue("clone")
Example #35
0
    def applyPatch(self, patch):
        yield self._dovccmd(['update-index', '--refresh'])

        res = yield self._dovccmd(['apply', '--index', '-p', str(patch[0])], initialStdin=patch[1])
        defer.returnValue(res)
Example #36
0
 def _syncSubmodule(self, _=None):
     rc = RC_SUCCESS
     if self.submodules:
         rc = yield self._dovccmd(['submodule', 'sync'])
     defer.returnValue(rc)
Example #37
0
 def _doClobber(self):
     """Remove the work directory"""
     rc = yield self.runRmdir(self.workdir, timeout=self.timeout)
     if rc != RC_SUCCESS:
         raise RuntimeError("Failed to delete directory")
     defer.returnValue(rc)
Example #38
0
 def get(self, resultSpec, kwargs):
     res = yield self.master.db.buildsets.getBuildset(kwargs['bsid'])
     res = yield self.db2data(res)
     defer.returnValue(res)
Example #39
0
 def modifyEvent(self, href, vevent):
     fileSize = int(self._fileSize.sample())
     yield self._client.postAttachment(href, 'x' * fileSize)
     returnValue(None)
Example #40
0
 def get_next_from_queue():
     packet = yield queue.get()
     returnValue(packet)
Example #41
0
class MaraClientProtocol(object, protocol.Protocol, TimeoutMixin):
    # Inherits from object the property new syntax

    class States(Names):
        STARTED = NamedConstant()
        CHECK_NEED_PEH = NamedConstant()
        SEND_PEH = NamedConstant()
        SEND_POLL = NamedConstant()
        WAITING_REPLY = NamedConstant()  # Workis with deferred incomingDefered
        USER_COMMAND = NamedConstant()
        GAVE_UP = NamedConstant()
        CONNECTION_LOST = NamedConstant()

    incomingDefered = None
    _state = None

    @property
    def state(self):
        return self._state

    @state.setter
    def state(self, new_state):
        assert new_state in self.States.iterconstants(), "Invalid state %s" % new_state
        # self.logger.info("State change %s -> %s", self._state, new_state)
        self._state = new_state

    def sendCotainer(self, container):
        """
        Convenience method for publishing when data is sent
        """
        # TODO: Publish COMASTER, STATE, DATA
        assert isinstance(container, Container)
        data = self.construct.build(container)
        self.logger.info("%s >> %s", self.state, upperhexstr(data))
        self.transport.write(data)

    @property
    def comaster(self):
        """
        Shortcut to comaster instance
        """
        return self.factory.comaster

    def setUp(self):
        """Initialization"""
        self.state = self.States.STARTED
        # Sequence
        s = self.comaster.sequence
        if s < sequence.MIN.value or s > sequence.MAX.value:
            s = sequence.MIN.value

        self.input_buffer = MaraFrameReassembler()

    @property
    def active(self):
        """Flag that checks if the main loop can be executed"""
        return self.state not in (self.States.CONNECTION_LOST, self.States.GAVE_UP)

    @defer.inlineCallbacks
    def mainLoop(self):
        """
        Main loop that executes the comunication. It tries to interleave every
        resposability the reactor has.
        """
        while self.active:
            yield self.doPEH()
            replied = yield self.doPoll()
            if not replied:
                continue  # Still online?

            whatNext = yield self.waitForNextPollOrUserCommands()

        self.transport.loseConnection()

    def waitForNextPollOrUserCommands(self):
        """
        Created a defered that will be callbacked form somewhere else indicating
        what shuold be done.
        """
        self.waitingDefered = defer.Deferred()
        reactor.callLater(self.comaster.poll_interval, self.waitingDefered.callback, None)
        return self.waitingDefered

    def connectionMade(self):
        """
        Called by twsited when the connection is made. The main loop is not implemented
        here for clarity reasons and testabilty using the reactor. Calls setup.
        """
        self.setUp()
        reactor.callLater(0, self.mainLoop)

    def buildPollContainer(self):
        """
        Creates a mara container using information of the comaster reference.
        """
        return Container(
            source=self.comaster.rs485_source,
            dest=self.comaster.rs485_destination,
            sequence=self.comaster.sequence,
            command=commands.POLL.value,
            payload_10=None,  # No payload,
        )

    def buildPeHContainer(self, timestamp):
        """
        Creates a PEH container.
        """
        container = Container(
            source=self.comaster.rs485_source,
            dest=0xFF,
            sequence=0xBB,
            command=commands.PEH.value,
            peh=timestamp
        )
        return container

    def pepreareToReceive(self):
        """
        Check if the connection is able to recieve data, if not ConnectionLost is risen.
        Created
        """
        if self.state == self.States.CONNECTION_LOST:
            raise ConnectionLost()
        self.input_buffer.reset()
        self.state = self.States.WAITING_REPLY
        # Incoming defered will not be completed until a FULL package is received
        # or timeout occurs (returning None)
        self.incomingDefered = defer.Deferred()
        self.setTimeout(self.comaster.poll_interval)
        return self.incomingDefered

    @defer.inlineCallbacks
    def doPoll(self):
        """
        Sends Poll commands and waits for reply.
        It supports data to be chunked. It times out.

        :return bool: True if data could be retrieved from device, False otherwise.
        """
        self.state = self.States.SEND_POLL

        tries, max_tries = 0, self.comaster.max_retry_before_offline

        while tries <= max_tries:

            try:
                self.pepreareToReceive()
            except ConnectionLost:
                self.setTimeout(None)
                defer.returnValue(False)

            # If it's not the first try, log it
            if tries:
                self.logger.debug("Retry: %s", tries)

            self.sendCotainer(self.buildPollContainer())
            try:
                _str, package = yield self.incomingDefered
                self.setTimeout(None)
                try:
                    yield threads.deferToThread(self.packageReceived, package)
                    self.logger.info("Saved, next poll SEQ: %s",
                                     i2hex(self.comaster.sequence))
                except Exception:
                    self.logger.exception("Package may be lost por partially saved:")

                defer.returnValue(True)  # Return True so sleep is performed

            except FieldError, e:
                self.logger.warning("Construct error: %s", e)
            except Timeout:
                tries += 1
                if tries > max_tries:
                    self.state = self.States.GAVE_UP
                    self.logger.critical("Giving up POLL response. Retry exceeded!")
                    defer.returnValue(False)

            except ConnectionLost:
                # Connection lost is set in handler since it's use is more general
                # self.state = self.States.CONNECTION_LOST
                defer.returnValue(False)
Example #42
0
    def action(self):
        # Don't perform any operations until the client is up and running
        if not self._client.started:
            returnValue(None)

        yield self.shareCalendar()
Example #43
0
 def get_url_by_jid(cls, jid):
     res = yield cls._db.find_one(
         {"jid": jid},
         fields=["url"])
     if res:
         defer.returnValue(res["url"])
Example #44
0
 def doUserCommands(self):
     """
     To be implemeted
     """
     yield defer.returnValue(None)
Example #45
0
 def get_last_modified(self):
     res = yield self._db.find_one(
         {"url": self._url},
         fields=["last_modified"])
     if "last_modified" in res:
         defer.returnValue(res["last_modified"])
Example #46
0
 def is_subscribed(self, url):
     """Is user subscribed to this url."""
     res = yield self._db.find_one(
         {"jid": self._jid, "url": url},
         fields=[])
     defer.returnValue(bool(res))
Example #47
0
 def on_exchange_third_party_invite_request(self, origin, room_id,
                                            event_dict):
     ret = yield self.handler.on_exchange_third_party_invite_request(
         origin, room_id, event_dict)
     defer.returnValue(ret)
Example #48
0
 def get_jid(self):
     res = yield self._db.find_one(
         {"url": self._url},
         fields=["jid"])
     if res:
         defer.returnValue(res["jid"])
Example #49
0
 def on_make_leave_request(self, room_id, user_id):
     pdu = yield self.handler.on_make_leave_request(room_id, user_id)
     time_now = self._clock.time_msec()
     defer.returnValue({"event": pdu.get_pdu_json(time_now)})
Example #50
0
    def get_room_events_stream_for_room(self,
                                        room_id,
                                        from_key,
                                        to_key,
                                        limit=0,
                                        order="DESC"):
        """Get new room events in stream ordering since `from_key`.

        Args:
            room_id (str)
            from_key (str): Token from which no events are returned before
            to_key (str): Token from which no events are returned after. (This
                is typically the current stream token)
            limit (int): Maximum number of events to return
            order (str): Either "DESC" or "ASC". Determines which events are
                returned when the result is limited. If "DESC" then the most
                recent `limit` events are returned, otherwise returns the
                oldest `limit` events.

        Returns:
            Deferred[tuple[list[FrozenEvent], str]]: Returns the list of
            events (in ascending order) and the token from the start of
            the chunk of events returned.
        """
        if from_key == to_key:
            defer.returnValue(([], from_key))

        from_id = RoomStreamToken.parse_stream_token(from_key).stream
        to_id = RoomStreamToken.parse_stream_token(to_key).stream

        has_changed = yield self._events_stream_cache.has_entity_changed(
            room_id, from_id)

        if not has_changed:
            defer.returnValue(([], from_key))

        def f(txn):
            sql = ("SELECT event_id, stream_ordering FROM events WHERE"
                   " room_id = ?"
                   " AND not outlier"
                   " AND stream_ordering > ? AND stream_ordering <= ?"
                   " ORDER BY stream_ordering %s LIMIT ?") % (order, )
            txn.execute(sql, (room_id, from_id, to_id, limit))

            rows = [_EventDictReturn(row[0], None, row[1]) for row in txn]
            return rows

        rows = yield self.runInteraction("get_room_events_stream_for_room", f)

        ret = yield self.get_events_as_list([r.event_id for r in rows],
                                            get_prev_content=True)

        self._set_before_and_after(ret, rows, topo_order=from_id is None)

        if order.lower() == "desc":
            ret.reverse()

        if rows:
            key = "s%d" % min(r.stream_ordering for r in rows)
        else:
            # Assume we didn't get anything because there was nothing to
            # get.
            key = from_key

        defer.returnValue((ret, key))
Example #51
0
 def on_query_request(self, query_type, args):
     received_queries_counter.inc(query_type)
     resp = yield self.registry.on_query(query_type, args)
     defer.returnValue((200, resp))
Example #52
0
 def on_send_leave_request(self, origin, content):
     logger.debug("on_send_leave_request: content: %s", content)
     pdu = event_from_pdu_json(content)
     logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
     yield self.handler.on_send_leave_request(origin, pdu)
     defer.returnValue((200, {}))
Example #53
0
 def get_magic_folder_size(self, name=None, content=None):
     if not content:
         content = yield self.get_json(self.get_magic_folder_dircap(name))
     if content:
         returnValue(self.size_from_content(content))
Example #54
0
 def on_invite_request(self, origin, content):
     pdu = event_from_pdu_json(content)
     ret_pdu = yield self.handler.on_invite_request(origin, pdu)
     time_now = self._clock.time_msec()
     defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
Example #55
0
        service,
        domain,
    )
    log.debug("DNS SRV: lookup: {l}", l=lookup)
    try:
        answers = (yield DebugResolver.lookupService(lookup))[0]
    except (DomainError, AuthoritativeDomainError), e:
        log.debug("DNS SRV: lookup failed: {exc}", exc=e)
        returnValue(None)

    if len(answers) == 1 and answers[0].type == dns.SRV \
                         and answers[0].payload \
                         and answers[0].payload.target == dns.Name('.'):
        # decidedly not available
        log.debug("DNS SRV: disabled: {l}", l=lookup)
        returnValue(None)

    servers = []
    for a in answers:

        if a.type != dns.SRV or not a.payload:
            continue

        servers.append((a.payload.priority, a.payload.weight,
                        str(a.payload.target), a.payload.port))

    log.debug("DNS SRV: lookup results: {l}\n{s}", l=lookup, s=servers)

    def _serverCmp(a, b):
        if a[0] != b[0]:
            return cmp(a[0], b[0])
Example #56
0
    def _handle_incoming_transaction(self, transaction, request_time):
        """ Process an incoming transaction and return the HTTP response

        Args:
            transaction (Transaction): incoming transaction
            request_time (int): timestamp that the HTTP request arrived at

        Returns:
            Deferred[(int, object)]: http response code and body
        """
        response = yield self.transaction_actions.have_responded(transaction)

        if response:
            logger.debug("[%s] We've already responded to this request",
                         transaction.transaction_id)
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        received_pdus_counter.inc_by(len(transaction.pdus))

        pdus_by_room = {}

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = request_time - int(p["age"])
                del p["age"]

            event = event_from_pdu_json(p)
            room_id = event.room_id
            pdus_by_room.setdefault(room_id, []).append(event)

        pdu_results = {}

        # we can process different rooms in parallel (which is useful if they
        # require callouts to other servers to fetch missing events), but
        # impose a limit to avoid going too crazy with ram/cpu.
        @defer.inlineCallbacks
        def process_pdus_for_room(room_id):
            logger.debug("Processing PDUs for %s", room_id)
            for pdu in pdus_by_room[room_id]:
                event_id = pdu.event_id
                try:
                    yield self._handle_received_pdu(transaction.origin, pdu)
                    pdu_results[event_id] = {}
                except FederationError as e:
                    logger.warn("Error handling PDU %s: %s", event_id, e)
                    pdu_results[event_id] = {"error": str(e)}
                except Exception as e:
                    pdu_results[event_id] = {"error": str(e)}
                    logger.exception("Failed to handle PDU %s", event_id)

        yield async .concurrently_execute(
            process_pdus_for_room,
            pdus_by_room.keys(),
            TRANSACTION_CONCURRENCY_LIMIT,
        )

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(transaction.origin, edu.edu_type,
                                        edu.content)

        pdu_failures = getattr(transaction, "pdu_failures", [])
        for failure in pdu_failures:
            logger.info("Got failure %r", failure)

        response = {
            "pdus": pdu_results,
        }

        logger.debug("Returning: %s", str(response))

        yield self.transaction_actions.set_response(transaction, 200, response)
        defer.returnValue((200, response))
Example #57
0
 def is_valid_transaction(self, tx, height):
     height <= len(self.headers) or defer.returnValue(False)
     merkle = yield self.network.get_merkle(tx.hex_id.decode(), height)
     merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
     header = self.headers[height]
     defer.returnValue(merkle_root == header['merkle_root'])
Example #58
0
        yield bitcoind.rpc_getblock(genesis_block_hash)
    except jsonrpc.Error_for_code(-5):
        defer.returnValue(False)
    else:
        defer.returnValue(True)


nets = dict(
    bitcoin=math.Object(
        P2P_PREFIX='f9beb4d9'.decode('hex'),
        P2P_PORT=8333,
        ADDRESS_VERSION=0,
        RPC_PORT=8332,
        RPC_CHECK=defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
            (yield check_genesis_block(
                bitcoind,
                '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
            )) and not (yield bitcoind.rpc_getinfo())['testnet'])),
        SUBSIDY_FUNC=lambda height: 50 * 100000000 >> (height + 1) // 210000,
        POW_FUNC=data.hash256,
        BLOCK_PERIOD=600,  # s
        SYMBOL='BTC',
        CONF_FILE_FUNC=lambda: os.path.join(
            os.path.join(os.environ['APPDATA'], 'Bitcoin')
            if platform.system() == 'Windows' else os.path.expanduser(
                '~/Library/Application Support/Bitcoin/') if platform.system()
            == 'Darwin' else os.path.expanduser('~/.bitcoin'), 'bitcoin.conf'),
        BLOCK_EXPLORER_URL_PREFIX='https://blockchain.info/block/',
        ADDRESS_EXPLORER_URL_PREFIX='https://blockchain.info/address/',
        TX_EXPLORER_URL_PREFIX='https://blockchain.info/tx/',
        SANE_TARGET_RANGE=(2**256 // 2**32 // 1000 - 1, 2**256 // 2**32 - 1),
Example #59
0
 def get_local_history(self, address):
     address_details = yield self.db.get_address(address)
     history = address_details['history'] or ''
     parts = history.split(':')[:-1]
     defer.returnValue(list(zip(parts[0::2], map(int, parts[1::2]))))
 def run(self):
     l = yield self.addLog('xx')
     yield l.addStdout(u'\N{CENT SIGN}'.encode('latin-1'))
     yield l.finish()
     defer.returnValue(results.SUCCESS)