示例#1
0
def send_schedule_request(socket, message, interval_secs=-1, headers=(),
                          queue=None, unschedule=False, cron=''):
    """
    Send a SCHEDULE or UNSCHEDULE command.

    Queues a message requesting that something happens on an
    interval for the scheduler.

    Args:
        socket (socket):
        job_schedule (str)
        message: Message to send socket.
        headers (list): List of headers for the message
        queue (str): name of queue the job should be executed in
    Returns:
        str: ID of the message
    """

    if unschedule:
        command = 'UNSCHEDULE'
    else:
        command = 'SCHEDULE'

    msgid = send_emqp_message(socket, command,
                              (queue or conf.DEFAULT_QUEUE_NAME,
                               ','.join(headers),
                               str(interval_secs),
                               serialize(message),
                               cron))

    return msgid
示例#2
0
def send_publish_request(socket, topic, message):

    msgid = send_emqp_message(socket, 'PUBLISH',
                              (topic,
                               serialize(message), ))

    return msgid
示例#3
0
 def jobs_info(self, jids, info_requested):
     
     infos = {}
     for rjids in iterate_xrange_limit(jids, self.jid_size_limit):    
         
         packed_jids, serialized_jids = self.pack_jids(rjids)        
         serialized_info = serialize(info_requested)
         cloudLog.info('query [%s] on jids %s' % (serialized_info, serialized_jids)) 
         
         resp = self.send_request(self.info_query,
                                  post_values={'jids':  packed_jids},
                                  get_values={'field': info_requested},
                                  logfunc=None)
         info_dct = resp['info']
                     
         for (x,y) in info_dct.iteritems():
             try:
                 x = long(x)
             except ValueError:
                 x = str(x)
             y = unicode_container_to_str(y)     
             
             # additional patching
             for k in y.keys():                
                 if k in ['func_object', 'args', 'kwargs']:
                     y[k] = base64.b64decode(y[k])
             
             infos[x] = y
         
     return infos
示例#4
0
    def modules_check(self, modules):
        """modules_check determines which modules must be sent from the client
        to the server.
        modules: list of tuples where each tuple is (filename, timestamp)
        
        Returns a list of filenames to send."""
        
        packedMods = Packer()
        packedMods.add(serialize(modules))      
        data = packedMods.finish()

        resp = self.send_request(self.modules_check_query,
                                 {'data': data,
                                  'hostname': str(self.hostname),
                                  'language': 'python'})        
        
        mods = resp['modules']
        
        if 'ap_version' in resp:
            self.__ap_version = resp['ap_version']
            #cloudLog.info("network.py: modules_check(): result['ap_version'] of query: %s" % resp['ap_version'])
            
        cloudLog.info('network.py: modules_check(): ap_version is now %s. needed mods are %s', 
                      self.__ap_version, mods)
        
        return mods
示例#5
0
def send_schedule_request(socket, message, interval_secs=-1, headers=(),
                          queue=None, unschedule=False, cron=''):
    """
    Send a SCHEDULE or UNSCHEDULE command.

    Queues a message requesting that something happens on an
    interval for the scheduler.

    Args:
        socket (socket):
        job_schedule (str)
        message: Message to send socket.
        headers (list): List of headers for the message
        queue (str): name of queue the job should be executed in
    Returns:
        str: ID of the message
    """

    if unschedule:
        command = 'UNSCHEDULE'
    else:
        command = 'SCHEDULE'

    msgid = send_emqp_message(socket, command,
                              (queue or conf.DEFAULT_QUEUE_NAME,
                               ','.join(headers),
                               str(interval_secs),
                               serialize(message),
                               cron))

    return msgid
示例#6
0
def api_docs(request, school, subject, course):
    json = serializers.get_serializer("json")()
    schoolo = get_object_or_404(School, name=school)
    subjecto= get_object_or_404(Subject, name=subject)
    courseo = get_object_or_404(Course, name=course)
    to_serialize = Document.objects.filter(school=schoolo, approved=True).filter(subject=subjecto).filter(course=courseo)
    return HttpResponse(json.serialize(to_serialize))
示例#7
0
  def dumps(cls, reply):
    """
    Convert a reply object to a json string

    @return: json string
    """
    return serialize(reply, cls=CustomEncoder)
示例#8
0
文件: messages.py 项目: com4/eventmq
def send_request(socket, message, reply_requested=False, guarantee=False,
                 retry_count=0, queue=None):
    """
    Send a REQUEST command.

    Default headers are always all disabled by default. If they are included in
    the headers then they have been enabled.

    To execute a task, the message should be formatted as follows:
    {subcommand(str), {
        # dot path location where callable can be imported. If callable is a
        # method on a class, the class should always come last, and be
        # seperated with a colon. (So we know to instantiate on the receiving
        # end)
        'path': path(str),
        # function or method name to run
        'callable': callable(str),
        # Optional args for callable
        'args': (arg, arg),
        # Optional kwargs for callable
        'kwargs': {'kwarg': kwarg},
        # Optional class args, kwargs
        'class_args': (arg2, arg3),
        'class_kwargs': {'kwarg2': kwarg}

        }
    }
    Args:
        socket (socket): Socket to use when sending `message`
        message: message to send to `socket`
        reply_requested (bool): request the return value of func as a reply
        guarantee (bool): (Give your best effort) to guarantee that func is
            executed. Exceptions and things will be logged.
        retry_count (int): How many times should be retried when encountering
            an Exception or some other failure before giving up. (default: 0
            or immediatly fail)
        queue (str): Name of queue to use when executing the job. Default: is
            configured default queue name

    Returns:
        str: ID of the message
    """
    headers = []

    if reply_requested:
        headers.append('reply-requested')

    if guarantee:
        headers.append('guarantee')

    if retry_count > 0:
        headers.append('retry-count:%d' % retry_count)

    msgid = send_emqp_message(socket, 'REQUEST',
                              (queue or conf.DEFAULT_QUEUE_NAME,
                               ",".join(headers),
                               serialize(message)))

    return msgid
示例#9
0
def send_request(socket, message, reply_requested=False, guarantee=False,
                 retry_count=0, queue=None):
    """
    Send a REQUEST command.

    Default headers are always all disabled by default. If they are included in
    the headers then they have been enabled.

    To execute a task, the message should be formatted as follows:
    {subcommand(str), {
        # dot path location where callable can be imported. If callable is a
        # method on a class, the class should always come last, and be
        # seperated with a colon. (So we know to instantiate on the receiving
        # end)
        'path': path(str),
        # function or method name to run
        'callable': callable(str),
        # Optional args for callable
        'args': (arg, arg),
        # Optional kwargs for callable
        'kwargs': {'kwarg': kwarg},
        # Optional class args, kwargs
        'class_args': (arg2, arg3),
        'class_kwargs': {'kwarg2': kwarg}

        }
    }
    Args:
        socket (socket): Socket to use when sending `message`
        message: message to send to `socket`
        reply_requested (bool): request the return value of func as a reply
        guarantee (bool): (Give your best effort) to guarantee that func is
            executed. Exceptions and things will be logged.
        retry_count (int): How many times should be retried when encountering
            an Exception or some other failure before giving up. (default: 0
            or immediatly fail)
        queue (str): Name of queue to use when executing the job. Default: is
            configured default queue name

    Returns:
        str: ID of the message
    """
    headers = []

    if reply_requested:
        headers.append('reply-requested')

    if guarantee:
        headers.append('guarantee')

    if retry_count > 0:
        headers.append('retry-count:%d' % retry_count)

    msgid = send_emqp_message(socket, 'REQUEST',
                              (queue or conf.DEFAULT_QUEUE_NAME,
                               ",".join(headers),
                               serialize(message)))

    return msgid
示例#10
0
    def on_schedule(self, msgid, message):
        """
        """
        logger.info("Received new SCHEDULE request: {}".format(message))

        queue = message[0]
        headers = message[1]
        interval = int(message[2])
        cron = str(message[4])

        schedule_hash = self.schedule_hash(message)

        # Notify if this is updating existing, or new
        if (schedule_hash in self.cron_jobs
                or schedule_hash in self.interval_jobs):
            logger.debug('Update existing scheduled job with %s' %
                         schedule_hash)
        else:
            logger.debug('Creating a new scheduled job with %s' %
                         schedule_hash)

        # If interval is negative, cron MUST be populated
        if interval >= 0:
            inter_iter = IntervalIter(monotonic(), interval)

            self.interval_jobs[schedule_hash] = [
                next(inter_iter), message[3], inter_iter, queue
            ]

            if schedule_hash in self.cron_jobs:
                self.cron_jobs.pop(schedule_hash)
        else:
            ts = int(timestamp())
            c = croniter(cron)
            c_next = next(c)
            if ts >= c_next:
                # If the next execution time has passed move the iterator to
                # the following time
                c_next = next(c)

            self.cron_jobs[schedule_hash] = [c_next, message[3], c, None]

            if schedule_hash in self.interval_jobs:
                self.interval_jobs.pop(schedule_hash)

        # Persist the scheduled job
        try:
            if schedule_hash not in self.redis_server.lrange(
                    'interval_jobs', 0, -1):
                self.redis_server.lpush('interval_jobs', schedule_hash)
            self.redis_server.set(schedule_hash, serialize(message))
            self.redis_server.save()
        except redis.ConnectionError:
            logger.warning('Could not contact redis server')
        except Exception as e:
            logger.warning(str(e))

        if 'nohaste' not in headers:
            self.send_request(message[3], queue=queue)
示例#11
0
文件: amcates.py 项目: amcat/amcat
    def bulk_update_values(self, articles):
        """Updates set of articles in bulk.
        """
        body = get_bulk_body({aid: serialize({"doc": a}) for aid, a in articles.items()}, action="update")
        resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)

        if resp["errors"]:
            raise ElasticSearchError(resp)
示例#12
0
    def bulk_update_values(self, articles):
        """Updates set of articles in bulk.
        """
        body = get_bulk_body({aid: serialize({"doc": a}) for aid, a in articles.items()}, action="update")
        resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)

        if resp["errors"]:
            raise ElasticSearchError(resp)
示例#13
0
 def __str__(self) -> str:
     error = dict(code=self.code, message=self.message)
     if self.data:
         error["data"] = self.data
     deserialized = dict(jsonrpc=self.jsonrpc, error=error)
     if self.id is not NOID:
         deserialized["id"] = self.id
     return serialize(sort_response(deserialized))
示例#14
0
def api_docs(request, school, subject, course):
    json = serializers.get_serializer("json")()
    schoolo = get_object_or_404(School, name=school)
    subjecto = get_object_or_404(Subject, name=subject)
    courseo = get_object_or_404(Course, name=course)
    to_serialize = Document.objects.filter(
        school=schoolo,
        approved=True).filter(subject=subjecto).filter(course=courseo)
    return HttpResponse(json.serialize(to_serialize))
示例#15
0
    def bulk_insert(self, dicts):
        """
        Add the given article dict objects to the index using a bulk insert call
        """
        body = get_bulk_body({d["id"]: serialize(d) for d in dicts})
        resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)

        if resp["errors"]:
            raise ElasticSearchError(resp)
示例#16
0
文件: amcates.py 项目: amcat/amcat
    def bulk_update(self, article_ids, script, params):
        """
        Execute a bulk update script with the given params on the given article ids.
        """
        payload = serialize({"script": dict(script, params=params)})
        body = get_bulk_body({aid: payload for aid in article_ids}, action="update")
        resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)

        if resp["errors"]:
            raise ElasticSearchError(resp)
示例#17
0
    def bulk_update(self, article_ids, script, params):
        """
        Execute a bulk update script with the given params on the given article ids.
        """
        payload = serialize(dict(script=script, params=params))
        body = get_bulk_body({aid: payload for aid in article_ids}, action="update")
        resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)

        if resp["errors"]:
            raise ElasticSearchError(resp)
示例#18
0
def get_hawk_sender(method, url, data, credentials):
    content = serialize(data) if data else data
    credentials = settings.HAWK_CREDENTIALS.get(credentials)

    return Sender(credentials,
                  url,
                  method,
                  content=content,
                  content_type="application/json",
                  seen_nonce=_seen_nonce)
示例#19
0
 def bulk_update(self, article_ids, script, params):
     """
     Execute a bulk update script with the given params on the given article ids.
     """
     payload = serialize(dict(script=script, params=params))
     def get_bulk_body(article_ids, payload):
         for aid in article_ids:
             yield serialize(dict(update={'_id': aid}))
             yield payload
     body = ("\n".join(get_bulk_body(article_ids, payload))) + "\n"
     r = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)
示例#20
0
def test_examples_mixed_requests_and_notifications():
    """
    We break the spec here. The examples put an invalid jsonrpc request in the
    mix here.  but it's removed to test the rest, because we're not validating
    each request individually. Any invalid jsonrpc will respond with a single
    error message.

    The spec example includes this which invalidates the entire request:
        {"foo": "boo"},
    """
    methods = Methods(
        sum=lambda ctx, *args: SuccessResponse(sum(args), id=ctx.request.id),
        notify_hello=lambda ctx, *args: SuccessResponse(19, id=ctx.request.id),
        subtract=lambda ctx, *args: SuccessResponse(
            args[0] - sum(args[1:]), id=ctx.request.id
        ),
        get_data=lambda ctx: SuccessResponse(["hello", 5], id=ctx.request.id),
    )
    requests = serialize(
        [
            {"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"},
            {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
            {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"},
            {
                "jsonrpc": "2.0",
                "method": "foo.get",
                "params": {"name": "myself"},
                "id": "5",
            },
            {"jsonrpc": "2.0", "method": "get_data", "id": "9"},
        ]
    )
    response = dispatch_pure(
        requests,
        methods,
        extra=None,
        serialize=default_serialize,
        deserialize=default_deserialize,
    )
    expected = [
        {"jsonrpc": "2.0", "result": 7, "id": "1"},
        {"jsonrpc": "2.0", "result": 19, "id": "2"},
        {
            "jsonrpc": "2.0",
            "error": {"code": -32601, "message": "Method not found", "data": "foo.get"},
            "id": "5",
        },
        {"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"},
    ]
    assert isinstance(response, BatchResponse)
    print(response.deserialized())
    for r in response.deserialized():
        assert r in expected
示例#21
0
    def _parse_users(cls, env_variable):
        users = env(env_variable)
        # The JSON representation of the variable is different on environments, so it needs to be parsed first
        parsed_users = users.replace("=>", ":")

        try:
            parsed_users = serialize(parsed_users)
        except ValueError:
            raise ValueError(
                f"{env_variable} has incorrect format;"
                f'\nexpected format: [{{"email": "", "organisation": "", role": ""}}]'
                f"\nbut got: {users}")

        return parsed_users
示例#22
0
    def _get_internal_users_list(cls):
        admin_users = env("INTERNAL_USERS")
        # The JSON representation of the variable is different on environments, so it needs to be parsed first
        parsed_admin_users = admin_users.replace("=>", ":")

        try:
            serialized_admin_users = serialize(parsed_admin_users)
        except ValueError:
            raise ValueError(
                f"INTERNAL_USERS has incorrect format;"
                f'\nexpected format: [{{"email": "", "role": ""}}]'
                f"\nbut got: {admin_users}")

        return serialized_admin_users
示例#23
0
    def bulk_update(self, article_ids, script, params):
        """
        Execute a bulk update script with the given params on the given article ids. 
        """
        payload = serialize(dict(script=script, params=params))

        def get_bulk_body(article_ids, payload):
            for aid in article_ids:
                yield serialize(dict(update={'_id': aid}))
                yield payload

        body = ("\n".join(get_bulk_body(article_ids, payload))) + "\n"
        r = self.es.bulk(body=body,
                         index=self.index,
                         doc_type=settings.ES_ARTICLE_DOCTYPE)
def test_examples_mixed_requests_and_notifications():
    """
    We break the spec here. The examples put an invalid jsonrpc request in the mix here.
    but it's removed to test the rest, because we're not validating each request
    individually. Any invalid jsonrpc will respond with a single error message.

    The spec example includes this which invalidates the entire request:
        {"foo": "boo"},
    """
    methods = Methods(
        **{
            "sum": lambda *args: sum(args),
            "notify_hello": lambda *args: 19,
            "subtract": lambda *args: args[0] - sum(args[1:]),
            "get_data": lambda: ["hello", 5],
        }
    )
    requests = serialize(
        [
            {"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"},
            {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
            {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"},
            {
                "jsonrpc": "2.0",
                "method": "foo.get",
                "params": {"name": "myself"},
                "id": "5",
            },
            {"jsonrpc": "2.0", "method": "get_data", "id": "9"},
        ]
    )
    response = dispatch_pure(
        requests, methods, convert_camel_case=False, context=NOCONTEXT, debug=True
    )
    expected = [
        {"jsonrpc": "2.0", "result": 7, "id": "1"},
        {"jsonrpc": "2.0", "result": 19, "id": "2"},
        {
            "jsonrpc": "2.0",
            "error": {"code": -32601, "message": "Method not found", "data": "foo.get"},
            "id": "5",
        },
        {"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"},
    ]
    assert isinstance(response, BatchResponse)
    for r in response.deserialized():
        assert r in expected
示例#25
0
    def send(
        self,
        request: Union[str, Dict, List],
        trim_log_values: bool = False,
        validate_against_schema: bool = True,
        **kwargs: Any
    ) -> Response:
        """
        Send a request, passing the whole JSON-RPC request object.

        After sending, logs, validates and parses.

        >>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
        <Response[1]>

        Args:
            request: The JSON-RPC request. Can be either a JSON-encoded string or a
                Request/Notification object.
            trim_log_values: Abbreviate the log entries of requests and responses.
            validate_against_schema: Validate response against the JSON-RPC schema.
            kwargs: Clients can use this to configure an single request. For example,
                HTTPClient passes this through to `requests.Session.send()`.
            in the case of a Notification.
        """
        # We need both the serialized and deserialized version of the request
        if isinstance(request, str):
            request_text = request
            request_deserialized = deserialize(request)
        else:
            request_text = serialize(request)
            request_deserialized = request
        batch = isinstance(request_deserialized, list)
        response_expected = batch or "id" in request_deserialized
        self.log_request(request_text, trim_log_values=trim_log_values)
        response = self.send_message(
            request_text, response_expected=response_expected, **kwargs
        )
        self.log_response(response, trim_log_values=trim_log_values)
        self.validate_response(response)
        response.data = parse(
            response.text, batch=batch, validate_against_schema=validate_against_schema
        )
        # If received a single error response, raise
        if isinstance(response.data, ErrorResponse):
            raise ReceivedErrorResponseError(response.data)
        return response
示例#26
0
文件: amcates.py 项目: amcat/amcat
 def bulk_insert(self, dicts, batch_size=1000, monitor=NullMonitor()):
     """
     Bulk insert the given articles in batches of batch_size
     """
     batches = list(toolkit.splitlist(dicts, itemsperbatch=batch_size)) if batch_size else [dicts]
     monitor = monitor.submonitor(total=len(batches))
     nbatches = len(batches)
     for i, batch in enumerate(batches):
         monitor.update(1, "Adding batch {iplus}/{nbatches}".format(iplus=i + 1, **locals()))
         props, articles = set(), {}
         for d in batch:
             props |= (set(d.keys()) - ALL_FIELDS)
             articles[d["id"]] = serialize(d)
         self.check_properties(props)
         body = get_bulk_body(articles)
         resp = self.es.bulk(body=body, index=self.index, doc_type=settings.ES_ARTICLE_DOCTYPE)
         if resp["errors"]:
             raise ElasticSearchError(resp)
示例#27
0
文件: network.py 项目: scivm/cloud
    def modules_add(self, modules, modules_tarball):
        """modules_add adds the specified modules to the picloud system.
        modules is a list of tuples, where each tuple is (name, timestamp).
        modules_tarball is a string representing the tarball of all the included modules."""
        packedMods = Packer()
        packedMods.add(serialize(modules))
        packedMods.add(modules_tarball)        
        data = packedMods.finish()

        resp = self.send_request(self.modules_add_query,
                                 {'data': data,
                                  'hostname': str(self.hostname),
                                  'language': 'python'})
        
        if 'ap_version' in resp:
            self.__ap_version = resp['ap_version']
            #cloudLog.info("network.py: modules_add(): result['ap_version'] of query: %s" % resp['ap_version'])
            
        cloudLog.info('network.py: modules_add(): ap_version is %s' % self.__ap_version)
示例#28
0
文件: amcates.py 项目: isususi/amcat
 def bulk_insert(self, dicts, batch_size=1000, monitor=NullMonitor()):
     """
     Bulk insert the given articles in batches of batch_size
     """
     batches = list(toolkit.splitlist(
         dicts, itemsperbatch=batch_size)) if batch_size else [dicts]
     monitor = monitor.submonitor(total=len(batches))
     nbatches = len(batches)
     for i, batch in enumerate(batches):
         monitor.update(
             1, "Adding batch {iplus}/{nbatches}".format(iplus=i + 1,
                                                         **locals()))
         props, articles = set(), {}
         for d in batch:
             props |= (set(d.keys()) - ALL_FIELDS)
             articles[d["id"]] = serialize(d)
         self.check_properties(props)
         body = get_bulk_body(articles)
         resp = self.es.bulk(body=body,
                             index=self.index,
                             doc_type=settings.ES_ARTICLE_DOCTYPE)
         if resp["errors"]:
             raise ElasticSearchError(resp)
示例#29
0
            return result
        except:
            try:
                exec(code, env)
            except:
                sys.stdout = __stdout
                import traceback
                buff = StringIO()
                traceback.print_exc(file=buff)
                #don't show rpc stack
                stack = buff.getvalue().replace('"<string>"',
                                                '"<JSON-RPC>"').split('\n')
                return '\n'.join([stack[0]] + stack[3:])
            else:
                result = sys.stdout.getvalue()
                sys.stdout = __stdout
                open(session_file, 'a+').write('\n%s' % code)
                return result

    def destroy(self, session_id):
        os.remove('../tmp/session_%s.py' % session_id)


if __name__ == '__main__':
    if valid_token():
        json.handle_cgi(Interpreter())
    else:
        print "Content-Type: application/json"
        print
        print json.serialize({"error": message})
示例#30
0
文件: amcates.py 项目: isususi/amcat
def _get_bulk_body(articles, action):
    for article_id, article in articles.items():
        yield serialize({action: {'_id': article_id}})
        yield article
示例#31
0
    def _start_event_loop(self):
        """
        Starts the actual event loop. Usually called by :meth:`Scheduler.start`
        """
        while True:
            if self.received_disconnect:
                break

            ts_now = int(timestamp())
            m_now = monotonic()
            events = self.poller.poll()

            if events.get(self.outgoing) == POLLIN:
                msg = self.outgoing.recv_multipart()
                self.process_message(msg)

            # TODO: distribute me!
            for hash_, cron in self.cron_jobs.items():
                # If the time is now, or passed
                if cron[0] <= ts_now:
                    msg = cron[1]
                    queue = cron[3]

                    # Run the msg
                    logger.debug("Time is: %s; Schedule is: %s - Running %s" %
                                 (ts_now, cron[0], msg))

                    self.send_request(msg, queue=queue)

                    # Update the next time to run
                    cron[0] = next(cron[2])
                    logger.debug("Next execution will be in %ss" %
                                 seconds_until(cron[0]))

            cancel_jobs = []
            for k, v in self.interval_jobs.iteritems():
                # TODO: Refactor this entire loop to be readable by humankind
                # The schedule time has elapsed
                if v[0] <= m_now:
                    msg = v[1]
                    queue = v[3]

                    logger.debug("Time is: %s; Schedule is: %s - Running %s" %
                                 (ts_now, v[0], msg))

                    # v[4] is the current remaining run_count
                    if v[4] != INFINITE_RUN_COUNT:
                        # If run_count was 0, we cancel the job
                        if v[4] <= 0:
                            cancel_jobs.append(k)
                        else:
                            # Decrement run_count
                            v[4] -= 1
                            # Persist the change to redis
                            try:
                                message = deserialize(self.redis_server.get(k))
                                new_headers = []
                                for header in message[1].split(','):
                                    if 'run_count:' in header:
                                        new_headers.append(
                                            'run_count:{}'.format(v[4]))
                                    else:
                                        new_headers.append(header)
                                message[1] = ",".join(new_headers)
                                self.redis_server.set(k, serialize(message))
                            except Exception as e:
                                logger.warning('Unable to update key in redis '
                                               'server: {}'.format(e))
                            # Perform the request since run_count still > 0
                            self.send_request(msg, queue=queue)
                            v[0] = next(v[2])
                    else:
                        # Scheduled job is in running infinitely
                        # Send job and update next schedule time
                        self.send_request(msg, queue=queue)
                        v[0] = next(v[2])

            for job in cancel_jobs:
                try:
                    logger.debug(
                        'Cancelling job due to run_count: {}'.format(k))
                    self.redis_server.delete(k)
                    self.redis_server.lrem('interval_jobs', 0, k)
                except Exception as e:
                    logger.warning('Unable to update key in redis '
                                   'server: {}'.format(e))
                del self.interval_jobs[k]

            if not self.maybe_send_heartbeat(events):
                break
示例#32
0
def api_schools(request):
    json = serializers.get_serializer("json")()
    return HttpResponse(json.serialize(School.objects.all().filter(featured=True), ensure_ascii=False))
示例#33
0
文件: cmd.py 项目: Cloudxtreme/leash
        return subprocess.check_output(code, shell=True)
    except subprocess.CalledProcessError as e:
        return e.output


def trace():
    import traceback, StringIO
    buff = StringIO.StringIO()
    traceback.print_exc(file=buff)
    return buff.getvalue()


if __name__ == '__main__':
    print "Content-Type: application/json"
    print
    import os
    from utils import valid_token
    response = {}
    if not os.environ['REMOTE_ADDR'] == os.environ['SERVER_ADDR']:
        response['error'] = 'You can access this script only from same server ' + \
                            '(Service.php script)'
    elif valid_token():
        try:
            response['result'] = shell_exec(stdin.read())
        except Exception, e:
            response['error'] = e.args[0]
            response['trace'] = trace()
    else:
        response['error'] = "The token is invalid"
    stdout.write(json.serialize(response))
示例#34
0
文件: splitfs.py 项目: moul/splitfs
def saveManifest(path, sf):
    fh = os.open(path, os.O_WRONLY | os.O_CREAT, 0777)
    os.write(fh, serialize(sf) + '\n')
    os.close(fh)
示例#35
0
文件: views.py 项目: zaebee/yasenput
def SerializeHTTPResponse(json):
        return HttpResponse(json.serialize(json), mimetype="application/json")
示例#36
0
 def get_bulk_body(article_ids, payload):
     for aid in article_ids:
         yield serialize(dict(update={'_id': aid}))
         yield payload
示例#37
0
文件: amcates.py 项目: amcat/amcat
def _get_bulk_body(articles, action):
    for article_id, article in articles.items():
        yield serialize({action: {'_id': article_id}})
        yield article
示例#38
0
 def get_bulk_body(dicts):
     for article_dict in dicts:
         yield serialize(dict(index={'_id' : article_dict['id']}))
         yield serialize(article_dict)
示例#39
0
文件: cmd.py 项目: alex-tools/leash
    os.chdir('..') # where are in cgi-bin
    try:
        return subprocess.check_output(code, shell=True)
    except subprocess.CalledProcessError as e:
        return e.output

def trace():
    import traceback, StringIO
    buff = StringIO.StringIO()
    traceback.print_exc(file=buff)
    return buff.getvalue()

if __name__ == '__main__':
    print "Content-Type: application/json"
    print
    import os
    from utils import valid_token
    response = {}
    if not os.environ['REMOTE_ADDR'] == os.environ['SERVER_ADDR']:
        response['error'] = 'You can access this script only from same server ' + \
                            '(Service.php script)'
    elif valid_token():
        try:
            response['result'] = shell_exec(stdin.read())
        except Exception, e:
            response['error'] = e.args[0]
            response['trace'] = trace()
    else:
        response['error'] = "The token is invalid"
    stdout.write(json.serialize(response))
示例#40
0
文件: python.py 项目: roycepope/leash
            return result
        except:
            try:
                exec (code, env)
            except:
                sys.stdout = __stdout
                import traceback

                buff = StringIO()
                traceback.print_exc(file=buff)
                # don't show rpc stack
                stack = buff.getvalue().replace('"<string>"', '"<JSON-RPC>"').split("\n")
                return "\n".join([stack[0]] + stack[3:])
            else:
                result = sys.stdout.getvalue()
                sys.stdout = __stdout
                open(session_file, "a+").write("\n%s" % code)
                return result

    def destroy(self, session_id):
        os.remove("../tmp/session_%s.py" % session_id)


if __name__ == "__main__":
    if valid_token():
        json.handle_cgi(Interpreter())
    else:
        print "Content-Type: application/json"
        print
        print json.serialize({"error": message})
示例#41
0
 def get_bulk_body(dicts):
     for article_dict in dicts:
         yield serialize(dict(index={'_id': article_dict['id']}))
         yield serialize(article_dict)
示例#42
0
def SerializeHTTPResponse(json):
    return HttpResponse(json.serialize(json), mimetype="application/json")
示例#43
0
 def get_bulk_body(article_ids, payload):
     for aid in article_ids:
         yield serialize(dict(update={'_id': aid}))
         yield payload
示例#44
0
    def on_schedule(self, msgid, message):
        """
        """
        logger.info("Received new SCHEDULE request: {}".format(message))

        queue = message[0]
        headers = message[1]
        interval = int(message[2])
        cron = str(message[4])
        run_count = self.get_run_count_from_headers(headers)

        schedule_hash = self.schedule_hash(message)

        # Notify if this is updating existing, or new
        if (schedule_hash in self.cron_jobs or
                schedule_hash in self.interval_jobs):
            logger.debug('Update existing scheduled job with %s'
                         % schedule_hash)
        else:
            logger.debug('Creating a new scheduled job with %s'
                         % schedule_hash)

        # If interval is negative, cron MUST be populated
        if interval >= 0:
            inter_iter = IntervalIter(monotonic(), interval)

            self.interval_jobs[schedule_hash] = [
                next(inter_iter),
                message[3],
                inter_iter,
                queue,
                run_count
            ]

            if schedule_hash in self.cron_jobs:
                self.cron_jobs.pop(schedule_hash)
        else:
            ts = int(timestamp())
            c = croniter(cron)
            c_next = next(c)
            if ts >= c_next:
                # If the next execution time has passed move the iterator to
                # the following time
                c_next = next(c)

            self.cron_jobs[schedule_hash] = [
                c_next, message[3], c, None]

            if schedule_hash in self.interval_jobs:
                self.interval_jobs.pop(schedule_hash)

        # Persist the scheduled job
        try:
            if schedule_hash not in self.redis_server.lrange(
                    'interval_jobs', 0, -1):
                self.redis_server.lpush('interval_jobs', schedule_hash)
            self.redis_server.set(schedule_hash, serialize(message))
            self.redis_server.save()
            logger.debug('Saved job {} with hash {} to redis'.format(
                message, schedule_hash))
        except redis.ConnectionError:
            logger.warning('Could not contact redis server. Unable to '
                           'guarantee persistence.')
        except Exception as e:
            logger.warning(str(e))

        # Send a request in haste mode, decrement run_count if needed
        if 'nohaste' not in headers:
            if run_count > 0 or run_count == INFINITE_RUN_COUNT:
                # Don't allow run_count to decrement below 0
                if run_count > 0:
                    self.interval_jobs[schedule_hash][4] -= 1
                self.send_request(message[3], queue=queue)
示例#45
0
 def pack_jids(jids):
     packedJids = Packer()
     serialized_jids = serialize(encode_maybe_xrange(jids))
     packedJids.add(serialized_jids)
     return packedJids.finish(), serialized_jids
示例#46
0
    def _start_event_loop(self):
        """
        Starts the actual event loop. Usually called by :meth:`Scheduler.start`
        """
        while True:
            if self.received_disconnect:
                break

            ts_now = int(timestamp())
            m_now = monotonic()
            events = self.poller.poll()

            if events.get(self.outgoing) == POLLIN:
                msg = self.outgoing.recv_multipart()
                self.process_message(msg)

            # TODO: distribute me!
            for hash_, cron in self.cron_jobs.items():
                # If the time is now, or passed
                if cron[0] <= ts_now:
                    msg = cron[1]
                    queue = cron[3]

                    # Run the msg
                    logger.debug("Time is: %s; Schedule is: %s - Running %s"
                                 % (ts_now, cron[0], msg))

                    self.send_request(msg, queue=queue)

                    # Update the next time to run
                    cron[0] = next(cron[2])
                    logger.debug("Next execution will be in %ss" %
                                 seconds_until(cron[0]))

            cancel_jobs = []
            for k, v in self.interval_jobs.iteritems():
                # The schedule time has elapsed
                if v[0] <= m_now:
                    msg = v[1]
                    queue = v[3]

                    logger.debug("Time is: %s; Schedule is: %s - Running %s"
                                 % (ts_now, v[0], msg))

                    if v[4] != INFINITE_RUN_COUNT:
                        # Decrement run_count
                        v[4] -= 1
                        # If run_count was 0, we cancel the job
                        if v[4] <= 0:
                            cancel_jobs.append(k)
                        # Otherwise we run the job
                        else:
                            # Send job and update next schedule time
                            self.send_request(msg, queue=queue)
                            v[0] = next(v[2])
                            # Rename redis key and save new run_count counter
                            try:
                                self.redis_server.rename(k,
                                                         self.schedule_hash(v))
                                self.redis_server.set(self.schedule_hash(v),
                                                      serialize(v))
                                self.redis_server.save()
                            except redis.ConnectionError:
                                logger.warning("Couldn't contact redis server")
                            except Exception as e:
                                logger.warning(
                                    'Unable to update key in redis '
                                    'server: {}'.format(e.message))
                    else:
                        # Scheduled job is in running infinitely
                        # Send job and update next schedule time
                        self.send_request(msg, queue=queue)
                        v[0] = next(v[2])
                        # Persist changes to redis
                        try:
                            self.redis_server.set(
                                self.schedule_hash(v), serialize(v))
                            self.redis_server.save()
                        except redis.ConnectionError:
                            logger.warning("Couldn't contact redis server")
                        except Exception as e:
                            logger.warning(
                                'Unable to update key in redis '
                                'server: {}'.format(e.message))

            for job in cancel_jobs:
                message = self.interval_jobs[k][1]
                self.unschedule_job(message)
                del self.interval_jobs[k]

            if not self.maybe_send_heartbeat(events):
                break
示例#47
0
def api_schools(request):
    json = serializers.get_serializer("json")()
    return HttpResponse(
        json.serialize(School.objects.all().filter(featured=True),
                       ensure_ascii=False))
示例#48
0
    def on_schedule(self, msgid, message):
        """
        """
        logger.info("Received new SCHEDULE request: {}".format(message))

        queue = message[0]
        headers = message[1]
        interval = int(message[2])
        cron = str(message[4])

        schedule_hash = self.schedule_hash(message)

        # Notify if this is updating existing, or new
        if (schedule_hash in self.cron_jobs or
                schedule_hash in self.interval_jobs):
            logger.debug('Update existing scheduled job with %s'
                         % schedule_hash)
        else:
            logger.debug('Creating a new scheduled job with %s'
                         % schedule_hash)

        # If interval is negative, cron MUST be populated
        if interval >= 0:
            inter_iter = IntervalIter(monotonic(), interval)

            self.interval_jobs[schedule_hash] = [
                next(inter_iter),
                message[3],
                inter_iter,
                queue,
                self.get_run_count_from_headers(headers)
            ]

            if schedule_hash in self.cron_jobs:
                self.cron_jobs.pop(schedule_hash)
        else:
            ts = int(timestamp())
            c = croniter(cron)
            c_next = next(c)
            if ts >= c_next:
                # If the next execution time has passed move the iterator to
                # the following time
                c_next = next(c)

            self.cron_jobs[schedule_hash] = [c_next,
                                             message[3],
                                             c,
                                             None]

            if schedule_hash in self.interval_jobs:
                self.interval_jobs.pop(schedule_hash)

        # Persist the scheduled job
        try:
            if schedule_hash not in self.redis_server.lrange(
                    'interval_jobs', 0, -1):
                self.redis_server.lpush('interval_jobs', schedule_hash)
            self.redis_server.set(schedule_hash, serialize(message))
            self.redis_server.save()
        except redis.ConnectionError:
            logger.warning('Could not contact redis server')
        except Exception as e:
            logger.warning(str(e))

        if 'nohaste' not in headers:
            self.send_request(message[3], queue=queue)
示例#49
0
 def __str__(self) -> str:
     return serialize(
         sort_response(
             dict(jsonrpc=self.jsonrpc, result=self.result, id=self.id)))
示例#50
0
"""Demonstrates processing a batch of 100 requests asynchronously"""
import asyncio
from json import dumps as serialize
from jsonrpcserver import method, async_dispatch as dispatch


@method
async def sleep_():
    await asyncio.sleep(1)


async def handle(request):
    return await dispatch(request)


if __name__ == "__main__":
    request = serialize([{"jsonrpc": "2.0", "method": "sleep_"} for _ in range(100)])
    asyncio.get_event_loop().run_until_complete(handle(request))