Example #1
0
def get_ranks(dataset, system):
    """
    Return the rank of the first correct answer returned by the
    system.
    """

    results = []
    oracle = CachedOracleSystem(dataset)
    all_expressions = set()
    for _, expressions in oracle.queries.values():
        all_expressions |= set(expressions)
    # all_expression_sets = [expressions for expressions in oracle.queries.values()]
    # all_possible_expressions = reduce(set.__or__, all_expression_sets)
    worst_possible_rank = len(all_expressions)
    logger.info("Number of possible expressions: %d", worst_possible_rank)
    for query, target_entities in dataset:
        logger.debug("Evaluating query %r", query)
        system_expressions = system.get_best_expressions(query)
        _, oracle_expressions = oracle.get_best_results_and_expressions(query)
        found_rank = get_rank(system_expressions, oracle_expressions, worst_possible_rank)
        logger.debug("Found rank: %r", found_rank)
        results.append({'query': query,
                        'target': target_entities,
                        'rank': found_rank})
    return results
Example #2
0
def restoredb(pg_env, pg_restore_binary, database_dump_path, dump_name):

    env = os.environ.copy()
    env.update(pg_env)

    answer = raw_input("This command will restore this dump into database %s. "
                       "Continue? (y)es, (N)o? " % env['PGDATABASE'])
    if answer != 'y':
        logger.info("Aborting!")
        return

    db_dump_file_name = os.path.join(database_dump_path, dump_name)
    if not os.path.isfile(db_dump_file_name):
        logger.error("file %s does not exist: " % db_dump_file_name)
        return

    logger.debug("Restoring %s" % db_dump_file_name)
    cmd = (pg_restore_binary, "-d", env['PGDATABASE'], "-O", "-x",
           db_dump_file_name)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if stderr != '':
        logger.error("An error occured while calling pg_restore: %s " % stderr)
        return
Example #3
0
 def new_session_prefork(self, comp_id):
     """
     Start up a new kernel asynchronously on a specified computer and put it
     in the prefork queue. Also check if there are any messages in
     stdout/stderr and fetch them - we don't need/expect anything, but if
     there is accumulation of output computer will eventually lock.
     """
     channel = self._clients[comp_id]["channel"]
     if channel.recv_ready():
         logger.debug("computer %s has stdout output: %s",
                      comp_id, channel.recv(2**15))
     if channel.recv_stderr_ready():
         logger.debug("computer %s has stderr output: %s",
                      comp_id, channel.recv_stderr(2**15))
     
     resource_limits = self._comps[comp_id].get("resource_limits")
     def cb(reply):
         if reply["type"] == "success":
             kernel_id = reply["content"]["kernel_id"]
             self._setup_session(reply, comp_id, timeout=float('inf'))
             self._kernel_queue.put((kernel_id, comp_id))
             logger.info("Started preforked kernel on %s: %s", comp_id[:4], kernel_id)
         else:
             logger.error("Error starting prefork kernel on computer %s: %s", comp_id, reply)
     logger.info("Trying to start kernel on %s", comp_id[:4])
     self._sender.send_msg_async({"type":"start_kernel", "content": {"resource_limits": resource_limits}}, comp_id, callback=cb)
Example #4
0
    def _setup_classpath_runtime_binary(self):
        """
        Returns the location of the mendix runtime files and the
        java classpath or None if the classpath cannot be determined
        (i.e. the Mendix Runtime is not available on this system)
        """

        logger.debug("Running from binary distribution.")
        classpath = []

        if not self._runtime_path:
            logger.debug("runtime_path is empty, no classpath can be "
                         "determined")
            return []

        if self.runtime_version < 5:
            classpath.extend([
                os.path.join(self._runtime_path, 'server', '*'),
                os.path.join(self._runtime_path, 'server', 'lib', '*'),
                os.path.join(self._runtime_path, 'runtime', '*'),
                os.path.join(self._runtime_path, 'runtime', 'lib', '*'),
            ])
        elif self.runtime_version >= 5:
            classpath.extend([
                os.path.join(self._runtime_path, 'runtime', 'felix', 'bin',
                             'felix.jar'),
                os.path.join(self._runtime_path, 'runtime', 'lib',
                             'com.mendix.xml-apis-1.4.1.jar')
            ])

        return classpath
Example #5
0
 def run(self):
     if self.parent.sock is None:
         return
     
     data = ""
     while not self.stopflag:
         rfds, _, efds = select.select([self.parent.sock], [], [self.parent.sock], 0.1)
         if len(efds) > 0:
             logger.error("remote client error")
             break
             
         if len(rfds) < 1:
             continue
         try:
             data = self.parent.sock.recv(SockClient.RECV_SIZE)
         except:
             logger.error("recv error")
             break
             
         if data == "":
             logger.error("socket closed")
             break
         
         logger.debug("data from %s:%d -> %s" % (self.parent.ip, self.parent.port, data))
         sigObject.emit(signals.SIG_DATA_RECVED, self.parent._id, data)
     
     self.parent.close(self.stopflag)
     logger.debug("tcp client stopped")
Example #6
0
 def __init__(self, dealer, id, connection, lifespan, timeout):
     self._on_stop = None
     self._dealer = dealer
     self.id = id
     self.executing = 0
     self.status = "starting"
     now = time.time()
     self.hard_deadline = now + lifespan
     self.timeout = timeout
     if timeout > 0:
         self.deadline = now + self.timeout
     self.session = jupyter_client.session.Session(key=connection["key"])
     self.channels = {}
     context = zmq.Context.instance()
     address = connection["ip"]
     if ":" in address:
         address = "[{}]".format(address)
     for channel, socket_type in (
             ("shell", zmq.DEALER), ("iopub", zmq.SUB), ("hb", zmq.REQ)):
         socket = context.socket(socket_type)
         socket.connect("tcp://{}:{}".format(address, connection[channel]))
         stream = zmq.eventloop.zmqstream.ZMQStream(socket)
         stream.channel = channel
         self.channels[channel] = stream
     self.channels["iopub"].socket.subscribe(b"")
     self.start_hb()
     logger.debug("KernelConnection initialized")
Example #7
0
                    def f_wrapper():
                        need_lock = getattr(f, 'need_lock', True)
                        
                        # Warning : put the bottle.response set inside the wrapper
                        # because outside it will break bottle
                        d = {}
                        method = getattr(f, 'method', 'get').lower()
                        for aname in args:
                            v = None
                            if method == 'post':
                                v = bottle.request.forms.get(aname, None)
                                # Post args are zlibed and cPickled
                                if v is not None:
                                    v = zlib.decompress(v)
                                    v = cPickle.loads(v)
                            elif method == 'get':
                                v = bottle.request.GET.get(aname, None)
                            if v is None:
                                raise Exception('Missing argument %s' % aname)
                            d[aname] = v
                        if need_lock:
                            logger.debug("HTTP: calling lock for %s" % fname)
                            lock.acquire()

                        ret = f(**d)

                        # Ok now we can release the lock
                        if need_lock:
                            lock.release()

                        encode = getattr(f, 'encode', 'json').lower()
                        j = json.dumps(ret)
                        return j
Example #8
0
    def link_data_folder(self):
        logger.debug("Linking data folders to %s" % self.options.data_path)
        os.chmod(os.path.join(self.options.data_path, "db"), 0700)
        self._ln_sf(os.path.join(self.options.data_path, "db"), os.path.join(self.shared, "db"))
        self._ln_sf(os.path.join(self.options.data_path, "log"), os.path.join(self.shared, "log"))
        self._ln_sf(os.path.join(self.options.data_path, "solr/data"), os.path.join(self.shared, "solr/data"))
        self._ln_sf(os.path.join(self.options.data_path, "system"), os.path.join(self.shared, "system"))

        self._ln_sf(os.path.join(self.shared, "db"), os.path.join(self.release_path, "postgres-db"))
        self._ln_sf(os.path.join(self.shared, "log"), os.path.join(self.release_path, "log"))
        self._ln_sf(os.path.join(self.shared, "solr/data"), os.path.join(self.release_path, "solr/data"))
        self._ln_sf(os.path.join(self.shared, "tmp"), os.path.join(self.release_path, "tmp"))
        self._ln_sf(os.path.join(self.shared, "system"), os.path.join(self.release_path, "system"))
        logger.debug("Linking nginx logs to %s/vendor/nginx/nginx_dist/nginx_data/logs" % self.release_path)
        self._mkdir_p(os.path.join(self.shared, "log/nginx"))
        self._ln_sf(
            os.path.join(self.shared, "log/nginx"),
            os.path.join(self.release_path, "vendor/nginx/nginx_dist/nginx_data/logs"),
        )

        self._ln_sf(
            os.path.join(self.release_path, "packaging/chorus_control.sh"),
            os.path.join(self.options.chorus_path, "chorus_control.sh"),
        )
        self._ln_sf(
            os.path.join(self.release_path, "packaging/setup/chorus_server"),
            os.path.join(self.options.chorus_path, "chorus_server"),
        )
        self.executor.run("chmod -R 0555 %s" % os.path.join(self.release_path, "public"))
Example #9
0
 def _reload_nginx(self):
     logger.info('reload nginx start')
     if not DEBUG:
         subprocess.call("nginx -t && nginx -s reload", shell=True)
     else:
         logger.debug('fake reload nginx')
     logger.info('reload nginx finish')
Example #10
0
def inject_variables(id, task, playbooks, adjust):
    variables = {}

    # post arguments
    args = request.json.get('args', {})
    variables.update(args)

    # node info (ip ...)
    node_info = get_node_info(id)
    node_info['task'] = task
    variables.update(node_info)

    # cluster info
    cluster_info = get_cluster_info(id)
    variables.update(cluster_info)

    # metadata
    metadata = get_metadata()
    variables.update(metadata)

    if adjust is not None:
        adjust(variables)

    logger.debug('inject variables: {0}'.format(variables))

    for pb in playbooks:
        pb['variables'] = eval(pb['variable_template'], variables)
        pb['variables'].update(args)
Example #11
0
def create():
    if request.method == 'POST':
        ip = request.json['ip']
        username = request.json['username']
        password = request.json['password']
        role_id = request.json['role_id']
        role_name = request.json['role_name']
        logger.debug(request.json)
        try:
            db = get_db()
            cursor = db.cursor()
            cursor.execute(
                "INSERT INTO node (ip, username, password, role_id, status, deleted) VALUES (?, ?, ?, ?, 'created', 0)",
                (ip, username, password, role_id)
            )
            role_id = cursor.lastrowid
            db.commit()
            cursor.close()

            add_host(role_name, ip)
            return jsonify({'status': 1, "role_id": role_id})
        except Exception as ex:
            logger.error(ex)
            logger.error(traceback.format_exc())
            return jsonify({'status': -1, 'error': ex.message})
Example #12
0
def get_meta_from_registry(app, meta_version, registry=None):
    logger.debug("ready get meta version %s for app %s from registry" %
                 (meta_version, app))
    meta_version = normalize_meta_version(meta_version)
    if not registry:
        registry = PRIVATE_REGISTRY
    try:
        y = None
        c = None
        cli = None
        cli = get_docker_client(DOCKER_BASE_URL)
        # TODO check if the image already exits
        cli.pull(
            repository="%s/%s" % (registry, app),
            tag="meta-%s" % (meta_version, ),
            insecure_registry=True
        )
        image = "%s/%s:meta-%s" % (registry, app, meta_version)
        command = '/bin/sleep 0.1'
        c = cli.create_container(image=image, command=command)
        r = cli.get_archive(container=c.get('Id'), path='/lain.yaml')
        tar = tarfile.open(fileobj=StringIO(r[0].data))
        f = tar.extractfile('lain.yaml')
        y = yaml.safe_load(f.read())
    except Exception, e:
        logger.error("fail get yaml from %s %s: %s" % (app, meta_version, e))
        raise Exception("fail get yaml from %s %s: %s" %
                        (app, meta_version, e))
Example #13
0
    def getManagedPackages(self, managedList, node):
        # Allow user to pass in managedList in case he needs to perform multiple lookups
        if not managedList:
            managedList = topology.getManagedList()

        distroName, distroRel, distroVersion, arch = self.getDistroArch(node)

        logger.debug("In getManagedPackages, node=%s, distroName=%s, distroRel=%s, distroVersion=%s" %
                     (node, distroName, distroRel, distroVersion))

        uniquePackages = set()
        for runtime in managedList['managedRuntime']:

            if runtime['distro'] != distroName:
                continue

            if runtime['distroVersion'] != distroRel and runtime['distroVersion'] != distroVersion:
                continue

            for package in runtime['autoportPackages']:
                uniquePackages.add(package['name']);
            for package in runtime['autoportChefPackages']:
                uniquePackages.add(package['name']);
            for package in runtime['userPackages']:
                if 'arch' in package:
                    if package['arch'] == arch:
                        uniquePackages.add(package['name'])
                else:
                    uniquePackages.add(package['name'])

        packages = ",".join(list(uniquePackages))

        logger.debug("Leaving getManagedPackages, node=%s, cnt packages[]=%d, packages=%s" % (node, len(uniquePackages), packages))

        return packages;
Example #14
0
    def do_work(self, s, returns_queue, c):
        ## restore default signal handler for the workers:
        # but on android, we are a thread, so don't do it
        if not is_android:
            signal.signal(signal.SIGTERM, signal.SIG_DFL)

        self.set_proctitle()

        print "I STOP THE http_daemon", self.http_daemon
        if self.http_daemon:
            self.http_daemon.shutdown()

        timeout = 1.0
        self.checks = []
        self.returns_queue = returns_queue
        self.s = s
        self.t_each_loop = time.time()
        while True:
            begin = time.time()
            msg = None
            cmsg = None

            # If we are dying (big problem!) we do not
            # take new jobs, we just finished the current one
            if not self.i_am_dying:
                # REF: doc/shinken-action-queues.png (3)
                self.get_new_checks()
                # REF: doc/shinken-action-queues.png (4)
                self.launch_new_checks()
            # REF: doc/shinken-action-queues.png (5)
            self.manage_finished_checks()

            # Now get order from master
            try:
                cmsg = c.get(block=False)
                if cmsg.get_type() == 'Die':
                    logger.debug("[%d] Dad say we are dying..." % self.id)
                    break
            except:
                pass

            if self._mortal == True and self._idletime > 2 * self._timeout:
                logger.warning("[%d] Timeout, Harakiri" % self.id)
                # The master must be dead and we are lonely, we must die
                break

            # Look if we are dying, and if we finish all current checks
            # if so, we really die, our master poller will launch a new
            # worker because we were too weak to manage our job :(
            if len(self.checks) == 0 and self.i_am_dying:
                logger.warning("[%d] I DIE because I cannot do my job as I should (too many open files?)... forgot me please." % self.id)
                break

            # Manage a possible time change (our avant will be change with the diff)
            diff = self.check_for_system_time_change()
            begin += diff

            timeout -= time.time() - begin
            if timeout < 0:
                timeout = 1.0
Example #15
0
    def search(self, entity):
        """
        Find entities related to the given one
        """
        assert type(entity) == unicode, "Expected unicode, got %r" % type(entity)
        uri = ensure_prefixed(entity)
        #uri = entity
        #print entity, uri
        #ref = URIRef(uri)
        #print "REF: %r" % ref
        logger.debug("Getting triples for %s", uri)
        triples = self.store.query("""
            prefix fb: <http://rdf.freebase.com/ns/>
            SELECT *
            WHERE
            {
                %s ?r ?o .
                FILTER(isURI(?o)) .
                FILTER(!regex(?r, ".*type.*")) .
            }
            LIMIT 150
            """ % uri)

        #triples = self.store.triples((ref, None, BNode()))
        logger.debug("Got triples")
        #print triples
        return [(uri, r, o) for r, o in triples]
Example #16
0
 def on_message(self, message):
     prefix, message = message.split(",", 1)
     id = prefix.split("/", 1)[0]
     message = jsonapi.loads(message)
     logger.debug("SockJSHandler.on_message: %s", message)
     msg_type = message["header"]["msg_type"]
     app = self.session.handler.application
     if id == "complete":
         if msg_type in ("complete_request", "object_info_request"):
             app.completer.registerRequest(self, message)
         return
     try:
         kernel = app.kernel_dealer.kernel(id)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         logger.warning("%s sent to nonexistent kernel %s", msg_type, id)
         return
     if id not in self.channels:
         self.channels[id] = SockJSChannelsHandler(self.send)
         self.channels[id].connect(kernel)
     if msg_type == "execute_request":
         stats_logger.info(StatsMessage(
             kernel_id=id,
             remote_ip=kernel.remote_ip,
             referer=kernel.referer,
             code=message["content"]["code"],
             execute_type="request"))
     self.channels[id].send(message)
Example #17
0
    def get_entity_score(self, entity):
        """
        Use the number of relations as a measure of importance.
        """
        entity = ensure_prefixed(entity)

        value = self.entity_scores.get(entity)
        if value:
            return value

        #assert False
        logger.debug("Entity %s not found in cache", entity)
        try:
            result = self.store.query("""
                prefix fb: <http://rdf.freebase.com/ns/>
                SELECT COUNT(*)
                WHERE
                {
                    %s ?r ?o .
                }
                """ % entity)
            score = int(result[0][0])
        except timeout:
            logger.exception("Timeout attempting to get count for entity: %s", entity)
            score = 0
        self.entity_scores[entity] = score
        return score
Example #18
0
def restoredb(config, dump_name):

    if not config.allow_destroy_db():
        logger.error(
            "Refusing to do a destructive database operation "
            "because the allow_destroy_db configuration option "
            "is set to false."
        )
        return False

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    db_dump_file_name = os.path.join(config.get_database_dump_path(), dump_name)
    if not os.path.isfile(db_dump_file_name):
        logger.error("file %s does not exist: " % db_dump_file_name)
        return False

    logger.debug("Restoring %s" % db_dump_file_name)
    cmd = (config.get_pg_restore_binary(), "-d", env["PGDATABASE"], "-O", "-n", "public", "-x", db_dump_file_name)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if stderr != "":
        logger.error("An error occured while calling pg_restore: %s " % stderr)
        return False

    return True
Example #19
0
    def set(self, key, value, expires=0):
        '''
        Set object with key.

        Args:
            key: cache key as str.
            value: object value.
            expires: cache time, default to 0 (using default expires time)

        >>> key = uuid.uuid4().hex
        >>> c = RedisClient('localhost')
        >>> c.set(key, u'Python\u4e2d\u6587')
        >>> c.get(key)
        u'Python\u4e2d\u6587'
        >>> c.set(key, ['A', 'B', 'C'])
        >>> c.get(key)
        ['A', 'B', 'C']
        >>> c.set(key, 'Expires after 1 sec', 1)
        >>> c.get(key)
        'Expires after 1 sec'
        >>> time.sleep(2)
        >>> c.get(key, 'Not Exist')
        'Not Exist'
        '''
        logger.debug('set cache: key = %s' % key)
        self._set(key, value, expires, use_pickle=True)
Example #20
0
    def unregister(self,msg):
	OwnGuid=self.msg.getValue(msg,'OwnGuid')
        logger.debug("Unregister user {!s}...".format(self.guid))
        self.factory.delClient(self.guid)
        logger.debug("Done")
        self.factory.sendNotify(self.guid,self.mysql,'OFFLINE_NOTIFY')
        self.mysql.delOnLineUser(self.guid)
Example #21
0
def getRadioMsg():
    while not radio.available(0):
        time.sleep(pause_between_get)
    receivedMessage = []
    radio.read(receivedMessage, radio.getDynamicPayloadSize())
    logger.debug(receivedMessage)
    return receivedMessage
Example #22
0
    def start(self, timeout=60, step=0.25):
        if self.check_pid():
            logger.error("The application process is already started!")
            return False

        cmd = self._config.get_java_cmd()
        env = self._config.get_java_env()

        try:
            logger.trace("[%s] Forking now..." % os.getpid())
            pid = os.fork()
            if pid > 0:
                self._pid = None
                logger.trace("[%s] Waiting for intermediate process to "
                             "exit..." % os.getpid())
                # prevent zombie process
                (waitpid, result) = os.waitpid(pid, 0)
                if result == 0:
                    logger.debug("The JVM process has been started.")
                    return True
                logger.error("Starting the JVM process did not succeed...")
                return False
        except OSError, e:
            logger.error("Forking subprocess failed: %d (%s)\n" %
                         (e.errno, e.strerror))
            return
    def _ssh_untrusted(self, cfg, client, comp_id):
        ip = socket.gethostbyname(cfg["host"])
        code = "%s '%s/receiver.py' '%s' '%s' '%s'"%(cfg["python"], cfg["location"], ip, comp_id, self.tmp_dir)
        logger.debug(code)
        ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command(code)
        stdout_channel = ssh_stdout.channel

        # Wait for untrusted side to respond with the bound port using paramiko channels
        # Another option would be to have a short-lived ZMQ socket bound on the trusted
        # side and have the untrusted side connect to that and send the port
        output = ""
        stdout_channel.settimeout(2.0)
        polls = 0
        while output.count("\n")!=2:
            try:
                output += stdout_channel.recv(1024)
            except socket.timeout:
                polls+= 1
            if stdout_channel.closed:
                logger.error(
                    "An error occurred getting data from the untrusted side.")
                return None
            if polls>20:
                return None
        return int(output.split("\n")[0])
Example #24
0
 def on_message(self, message):
     prefix, json_message = message.split(",", 1)
     kernel_id = prefix.split("/", 1)[0]
     message = jsonapi.loads(json_message)
     logger.debug("KernelConnection.on_message: %s", message)
     application = self.session.handler.application
     if kernel_id == "complete":
         if message["header"]["msg_type"] in ("complete_request",
                                              "object_info_request"):
             application.completer.registerRequest(self, message)
         return
     try:
         if kernel_id not in self.channels:
             # handler may be None in certain circumstances (it seems to only be set
             # in GET requests, not POST requests, so even using it here may
             # only work with JSONP because of a race condition)
             kernel_info = application.km.kernel_info(kernel_id)
             self.kernel_info = {'remote_ip': kernel_info['remote_ip'],
                                 'referer': kernel_info['referer'],
                                 'timeout': kernel_info['timeout']}
         if message["header"]["msg_type"] == "execute_request":
             stats_logger.info(StatsMessage(
                 kernel_id=kernel_id,
                 remote_ip=self.kernel_info['remote_ip'],
                 referer=self.kernel_info['referer'],
                 code=message["content"]["code"],
                 execute_type='request'))
         if kernel_id not in self.channels:
             self.channels[kernel_id] = SockJSChannelsHandler(self.send)
             self.channels[kernel_id].open(application, kernel_id)
         self.channels[kernel_id].on_message(json_message)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         logger.info("%s message sent to nonexistent kernel: %s" %
                     (message["header"]["msg_type"], kernel_id))
Example #25
0
    def get(self, key, default=None):
        '''
        Get object by key.

        Args:
            key: cache key as str.
            default: default value if key not found. default to None.
        Returns:
            object or default value if not found.

        >>> key = uuid.uuid4().hex
        >>> c = RedisClient('localhost')
        >>> c.get(key)
        >>> c.get(key, 'DEFAULT_REDIS')
        'DEFAULT_REDIS'
        >>> c.set(key, u'hello redis')
        >>> c.get(key)
        u'hello redis'
        >>> c.set(key, 12345)
        >>> c.get(key)
        12345
        '''
        logger.debug('get cache: key = %s' % key)
        r = self._client.get(key)
        if r is None:
            return default
        return _safe_pickle_loads(r)
Example #26
0
 def open(self, kernel_id):
     logger.debug("entered ShellHandler.open for kernel %s", kernel_id)
     super(ShellHandler, self).open(kernel_id)
     self.kill_kernel = False
     self.shell_stream = self.km.create_shell_stream(self.kernel_id)
     self.shell_stream.on_recv(self._on_zmq_reply)
     self.msg_from_kernel_callbacks.append(self._reset_deadline)
Example #27
0
 def apply_connection(self, entities, connection):
     logger.debug("Applying connection %r to entities %r",
                  connection, entities)
     connection = [ensure_prefixed(part) for part in connection]
     if len(connection) == 1:
         results = self.store.query("""
             prefix fb: <http://rdf.freebase.com/ns/>
             SELECT ?o
             WHERE
             {
                 ?s %s ?o .
                 FILTER(?s IN (%s)) .
             }
             LIMIT 100
             """ % (connection[0], ','.join(entities)))
     elif len(connection) == 2:
         results = self.store.query("""
             prefix fb: <http://rdf.freebase.com/ns/>
             SELECT ?o2
             WHERE
             {
                 ?s %s ?o1 .
                 ?o1 %s ?o2
                 FILTER(?s IN (%s)) .
             }
             LIMIT 100
             """ % (connection[0], connection[1], ','.join(entities)))
     else:
         raise ValueError("Unexpected number of parts to connection")
     return [result[0] for result in results]
Example #28
0
    def makePreview(self, path, maxDim=800):
        self.prevPath = join(path, 'pv_' + self.getBaseName() + self.getFileType())
        # If a file with the preview name exists, we assume if was created by pytof earlier
        # and we exit
        if exists(self.prevPath): return

        width = self.image.size[0]
        height = self.image.size[1]
        if width > height and width > maxDim:
            newWidth = maxDim
            newHeight = int(maxDim * float(height) / width)
        elif height > width and height > maxDim:
            newWidth = int(maxDim * float(width) / height)
            newHeight = maxDim
        else:
            newWidth = 0
            newHeight = 0
        if (newWidth, newHeight) != (0, 0):
            out = self.image.resize((newWidth, newHeight), Image.ANTIALIAS)
        else:
            out = self.image

        if self.rotation == 'Rotated 90 CW':
            logger.debug('makePreview: Rotate')
            out = out.rotate(-90)
        else:
            logger.debug('makePreview: Do not rotate')
            
        out.save(self.prevPath, quality=95)
Example #29
0
 def _configure_logging(self):
     # try configure logging
     # catch:
     # - logsubscriber already exists -> ignore
     #   (TODO:functions to restart logging when config is changed?)
     # - logging already started -> ignore
     logger.debug("Setting up logging...")
     logging_config = self.config.get_logging_config()
     if len(logging_config) == 0:
         logger.warn("No logging settings found, this is probably not what "
                     "you want.")
         return
     for log_subscriber in logging_config:
         if log_subscriber["name"] != "*":
             m2eeresponse = self.client.create_log_subscriber(
                 log_subscriber)
             result = m2eeresponse.get_result()
             if result == 3:  # logsubscriber name exists
                 pass
             elif result != 0:
                 m2eeresponse.display_error()
         if "nodes" in log_subscriber:
             self.set_log_levels(log_subscriber["name"],
                                 log_subscriber["nodes"], force=True)
     self.client.start_logging()
Example #30
0
    def get_disk_usage(self):
        """
        Function return cpu usage on node.
        """
        ret_list = []
        cmd = "df -l | grep -v ^Filesystem "
        result = commands.getoutput(cmd)
        for item in result.splitlines():
            ret_list.append({})    

        col = ("source", "size", "avail", "pcent", "target")
        for item_col in col:
            i = 0
            cmd = "df -l --output=%s | awk 'NR>1 {print $0}'" % item_col
            result = commands.getoutput(cmd)
            for item in result.splitlines():
                ret_list[i][item_col] = item.strip()
                i += 1

        logger.debug(ret_list)
        #delete tmpfs: delete the one that does not begin with '/'
        for index in range(len(ret_list)-1, -1, -1):
            if re.match('/', ret_list[index]["source"]) is None:
                del(ret_list[index])
            else:
                #add column: util
                cmd = "iostat -x %s | grep  -A1 util | tail -1 | awk '{print $NF}' " % ret_list[index]["source"]
                result = commands.getoutput(cmd)
                ret_list[index]['util'] = float(result)*100
                #delete character '%'
                ret_list[index]['pcent'] = ("%.2f" % float(ret_list[index]['pcent'][:-1]))
            
        return ret_list
    def sample_topic_latest_offset(self, kafka_client, cluster, topic):
        topic_latest_offset_lasttime = {} # {cluster1_topic1:offset_data,} 存放上次采集的topic latest offset数据信息
        for i in range(0, self.count):
            timetuple = time.localtime() # 记录采样时间
            second_for_localtime1 = time.mktime(timetuple) # UTC时间(秒)
            sample_time = time.strftime('%Y-%m-%d %H:%M:%S', timetuple)

            result = kafka_client.get_topic_latest_offset(topic)
            topic_name = topic.name.decode('utf-8')

            if result[0]:
                topic_latest_offset = result[1]
                logger.debug('集群 %s 主题 %s offset:%s ' % (cluster, topic_name, topic_latest_offset))

                key = cluster + '_' + topic_name
                if key not in topic_latest_offset_lasttime:
                    topic_latest_offset_lasttime[key] = topic_latest_offset
                    topic_latest_offset_lasttime['sample_time_in_second'] = second_for_localtime1
                else:
                    temp_dict = {}
                    temp_dict['db'] = 'db_' + cluster # 采样存放数据库
                    temp_dict['topic'] = topic_name
                    temp_dict['sample_time'] = sample_time # 采样时间字符串
                    temp_dict['sample_time_in_second'] = second_for_localtime1 # 采样时间,单位 秒
                    temp_dict['sample_type'] = 'topic'  # 采样类型
                    temp_dict['sample_data'] = [topic_latest_offset_lasttime['sample_time_in_second'], topic_latest_offset_lasttime[key], topic_latest_offset] # 采样数据
                    KafkaMonitor.data_queue.append(temp_dict)

                    topic_latest_offset_lasttime[key] = topic_latest_offset
                    topic_latest_offset_lasttime['sample_time_in_second'] = second_for_localtime1
            else:
                logger.error('获取集群 %s 对应主题 %s 的latest offset失败:%s' % (cluster, topic_name, result[1]))

            second_for_localtime2 = time.mktime(time.localtime()) # UTC时间(秒)
            time_difference = second_for_localtime2 - second_for_localtime1
            if time_difference < self.interval: # 仅在耗时未超过指定时间间隔才进行休眠
                time.sleep(self.interval - time_difference)
Example #32
0
    def genmove(self):
        self.send_command(f'time_left black {self.time_per_move:d} 1')
        self.send_command(f'time_left white {self.time_per_move:d} 1')

        logger.debug("Board state: %s to play\n%s", self.whose_turn(), self.showboard())

        # Generate next move
        self.process.stdin.write(f"genmove {self.whose_turn()}\n")
        self.process.stdin.flush()

        updated = 0
        stdout = []
        stderr = []

        while updated < self.time_per_move * 2:
            out, err = self.drain()
            stdout.extend(out)
            stderr.extend(err)

            self.parse_status_update("".join(err))

            if out:
                break

            updated += 1
            sleep(1)

        # Confirm generated move with new line
        self.process.stdin.write("\n")
        self.process.stdin.flush()

        # Drain the rest of output
        out, err = self.drain()
        stdout.extend(out)
        stderr.extend(err)

        return stdout, stderr
Example #33
0
def get_collection(data, db_session, username):
    title = data.get('title')

    logger.info(LogMsg.START, username)
    user = check_user(username, db_session)
    if user is None:
        raise Http_error(400, Message.INVALID_USER)

    if user.person_id is None:
        logger.error(LogMsg.USER_HAS_NO_PERSON, username)
        raise Http_error(400, Message.Invalid_persons)

    validate_person(user.person_id, db_session)
    logger.debug(LogMsg.PERSON_EXISTS)

    if 'person_id' in data.keys() and username in ADMINISTRATORS:
        person_id = data.get('person_id')
    else:
        person_id = user.person_id

    collection_items = db_session.query(Collection).filter(
        and_(Collection.person_id == person_id,
             Collection.title == title)).order_by(
                 Collection.creation_date.desc()).all()

    result = []

    for item in collection_items:
        if item.book_id is None:
            book = {}
        else:
            book = get_book(item.book_id, db_session)
        result.append(book)

    logger.info(LogMsg.END)

    return result
Example #34
0
    def run(self):
        self.select_rlist.append(self.sock)

        self.keep_going = True
        while self.keep_going:
            readable, writable, exceptional = select(self.select_rlist, [], self.select_rlist, 0.5)

            for sock in readable:
                if sock == self.sock:
                    # new connection
                    conn, client_address = self.sock.accept()
                    logger.info('New %s connection from %s' % (self.__class__.__name__, str(client_address)))
                    self.select_rlist.append(conn)
                    self.client_socks.append(conn)
                    self.new_connection(conn)
                else:
                    # new data from client
                    data = sock.recv(4096)
                    if not data:
                        # connection closed by client
                        try:
                            peer = sock.getpeername()
                        except:
                            peer = "UNKNOWN"
                        logger.info('%s connection %s closed' % (self.__class__.__name__, str(peer)))
                        sock.close()
                        self.select_rlist.remove(sock)
                        self.client_socks.remove(sock)
                    else:
                        try:
                            line = data.decode('UTF-8').strip()
                            if line != '':
                                logger.debug('%s got line: %s' % (self.__class__.__name__, line))
                                self.process_command(sock, line)
                        except UnicodeDecodeError:
                            self.send(sock, 'WHAT?')
                            logger.warn('%s recieved invalid data' % (self.__class__.__name__, ))
Example #35
0
 def get(self):
     logger.debug('RootHandler.get')
     args = self.request.arguments
     code = None
     language = args["lang"][0] if "lang" in args else None
     interacts = None
     if "c" in args:
         # If the code is explicitly specified
         code = "".join(args["c"])
     elif "z" in args:
         # If the code is base64-compressed
         try:
             z = "".join(args["z"])
             # We allow the user to strip off the ``=`` padding at the end
             # so that the URL doesn't have to have any escaping.
             # Here we add back the ``=`` padding if we need it.
             z += "=" * ((4 - (len(z) % 4)) % 4)
             if "interacts" in args:
                 interacts = "".join(args["interacts"])
                 interacts += "=" * ((4 - (len(interacts) % 4)) % 4)
                 interacts = zlib.decompress(
                     base64.urlsafe_b64decode(interacts))
             code = zlib.decompress(base64.urlsafe_b64decode(z))
         except Exception as e:
             self.set_status(400)
             self.finish("Invalid zipped code: %s\n" % (e.message, ))
             return
     if "q" in args:
         # The code is referenced by a permalink identifier.
         q = "".join(args["q"])
         try:
             self.application.db.get_exec_msg(q, self.return_root)
         except LookupError:
             self.set_status(404)
             self.finish("ID not found in permalink database")
     else:
         self.return_root(code, language, interacts)
Example #36
0
    def _get_network_flow_rate_dict(self):
        try:
            eclipse_time = 1
            old_ret = self._get_network_flow_data()
            time.sleep(eclipse_time)
            new_ret = self._get_network_flow_data()
            if len(old_ret) != len(new_ret):
                logger.warning("net interface's num is changing")
                return []

            ret = []
            for index in range(len(old_ret)):
                old_receive_bytes = old_ret[index]['receive_bytes']
                old_transfer_bytes = old_ret[index]['transfer_bytes']
                new_receive_bytes = new_ret[index]['receive_bytes']
                new_transfer_bytes = new_ret[index]['transfer_bytes']
                receive_rate = (float(new_receive_bytes - old_receive_bytes) /
                                1024.0 / eclipse_time)
                transfer_rate = (
                    float(new_transfer_bytes - old_transfer_bytes) / 1024.0 /
                    eclipse_time)
                if receive_rate < 0 or transfer_rate < 0:
                    receive_rate = 0
                    transfer_rate = 0

                item = {}
                item['name'] = old_ret[index]['name']
                item['in'] = receive_rate
                item['out'] = transfer_rate
                item['type'] = "service"  #todo
                ret.append(item)
            logger.debug(ret)
            return ret

        except Exception, e:
            logger.exception(e)
            return []
def get_my_embeddings(word2id, config, col):
    """
    该函数用于产生符合项目的预训练词向量
    :param path_temp_vocab:  str类型,词典路径
    :param path_temp_pretrain_vocab_vectors: str类型,源预训练向量路径
    :param path_temp_new_pretrain_embedding: str类型,项目预训练向量路径
    :param embedding_dim: int类型,embedding维度
    """
    embedding_path = os.path.join(
        config['path_train_test_settings']['path_col_model_embeddings'],
        'embedding_{}.npz'.format(col))
    path_row_embedding = os.path.join(
        config['path_pipeline_settings']['path_word2vec_txt'],
        'models_{}_embedding.txt'.format(col))

    if os.path.exists(embedding_path):
        logger.debug('{}_embedding已存在'.format(col))
        return None

    # 构建符合本项目的词向量
    embeddings = np.random.rand(
        len(word2id), int(config['train_test_settings']['embedding_dim']))
    f = open(path_row_embedding, 'r', encoding='UTF-8')
    for i, line in enumerate(f):
        if i == 0:  # 若第一行是标题,则跳过
            continue
        lin = line.strip().split(" ")  # 预训练词向量
        if lin[0] in word2id:
            idx = word2id[lin[0]]
            emb = [
                float(x) for x in
                lin[1:int(config['train_test_settings']['embedding_dim']) + 1]
            ]
            embeddings[idx] = np.asarray(emb, dtype="float32")
    f.close()
    np.savez_compressed(embedding_path, embeddings=embeddings)
    logger.debug('{}_embedding已生成'.format(col))
Example #38
0
def save_cluster(user, beans, form):
    global G_vclustermgr
    clustername = form.get('clustername', None)
    if (clustername == None):
        return json.dumps({
            'success': 'false',
            'message': 'clustername is null'
        })

    imagename = form.get("image", None)
    description = form.get("description", None)
    containername = form.get("containername", None)
    isforce = form.get("isforce", None)
    if not isforce == "true":
        [status, message] = G_vclustermgr.image_check(user, imagename)
        if not status:
            return json.dumps({
                'success': 'false',
                'reason': 'exists',
                'message': message
            })

    user_info = post_to_user("/user/selfQuery/", {'token': form.get("token")})
    [status, message
     ] = G_vclustermgr.create_image(user, clustername, containername,
                                    imagename, description,
                                    user_info["data"]["groupinfo"]["image"])
    if status:
        logger.info("image has been saved")
        return json.dumps({'success': 'true', 'action': 'save'})
    else:
        logger.debug(message)
        return json.dumps({
            'success': 'false',
            'reason': 'exceed',
            'message': message
        })
Example #39
0
 def on_message(self, message):
     prefix, json_message = message.split(",", 1)
     kernel_id = prefix.split("/", 1)[0]
     message = jsonapi.loads(json_message)
     logger.debug("KernelConnection.on_message: %s", message)
     application = self.session.handler.application
     if kernel_id == "complete":
         if message["header"]["msg_type"] in ("complete_request",
                                              "object_info_request"):
             application.completer.registerRequest(self, message)
         return
     try:
         if kernel_id not in self.channels:
             # handler may be None in certain circumstances (it seems to only be set
             # in GET requests, not POST requests, so even using it here may
             # only work with JSONP because of a race condition)
             kernel_info = application.km.kernel_info(kernel_id)
             self.kernel_info = {'remote_ip': kernel_info['remote_ip'],
                                 'referer': kernel_info['referer'],
                                 'timeout': kernel_info['timeout']}
         if message["header"]["msg_type"] == "execute_request":
             stats_logger.info(StatsMessage(
                 kernel_id=kernel_id,
                 remote_ip=self.kernel_info['remote_ip'],
                 referer=self.kernel_info['referer'],
                 code=message["content"]["code"],
                 execute_type='request'))
         if kernel_id not in self.channels:
             self.channels[kernel_id] = SockJSChannelsHandler(self.send)
             self.channels[kernel_id].open(application, kernel_id)
         self.channels[kernel_id].on_message(json_message)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         import traceback
         logger.info("%s message sent to nonexistent kernel: %s\n%s" %
                     (message["header"]["msg_type"], kernel_id,
                     traceback.format_exc()))
Example #40
0
    def write_felix_config(self):
        felix_config_file = self.get_felix_config_file()
        felix_config_path = os.path.dirname(felix_config_file)
        if not os.access(felix_config_path, os.W_OK):
            logger.critical("felix_config_file is not in a writable "
                            "location: %s" % felix_config_path)
            return False

        project_bundles_path = os.path.join(self._conf['m2ee']['app_base'],
                                            'model', 'bundles')
        osgi_storage_path = os.path.join(self._conf['m2ee']['app_base'],
                                         'data', 'tmp', 'felixcache')
        felix_template_file = os.path.join(self._runtime_path, 'runtime',
                                           'felixconfig.properties.template')
        if os.path.exists(felix_template_file):
            logger.debug("writing felix configuration template from %s "
                         "to %s" % (felix_template_file, felix_config_file))
            try:
                input_file = open(felix_template_file)
                template = input_file.read()
            except IOError, e:
                logger.error(
                    "felix configuration template could not be "
                    "read: %s", e)
                return False
            try:
                output_file = open(felix_config_file, 'w')
                render = template.format(
                    ProjectBundlesDir=project_bundles_path,
                    InstallDir=self._runtime_path,
                    FrameworkStorage=osgi_storage_path)
                output_file.write(render)
            except IOError, e:
                logger.error(
                    "felix configuration file could not be "
                    "written: %s", e)
                return False
def get(code, db_session):
    logger.debug(LogMsg.CHECK_UNIQUE_EXISTANCE, code)
    result = db_session.query(ConstraintHandler).filter(
        ConstraintHandler.UniqueCode == code).first()
    if result is not None:
        logger.debug(LogMsg.UNIQUE_CONSTRAINT_EXISTS, code)
    else:
        logger.debug(LogMsg.UNIQUE_NOT_EXISTS, code)
    return result
Example #42
0
def sync_configs(source_dir: str, target_dir: str):
    """
    将指定的配置相关文件从 源目录 覆盖到 目标目录
    """
    sync_config_list = [
        # 配置文件
        "config.toml",
        "config.toml.local",

        # 特定功能的开关
        ".disable_pause_after_run",
        ".use_by_myself",
        "不查询活动.txt",
        ".no_message_box",

        # 缓存文件所在目录
        ".db",

        # # 自动更新DLC
        # "utils/auto_updater.exe"
    ]

    logger.debug(f"将以下配置从{source_dir} 复制并覆盖到 {target_dir}")

    for filename in sync_config_list:
        source = os.path.join(source_dir, filename)
        destination = os.path.join(target_dir, filename)

        if not os.path.exists(source):
            logger.debug(f"旧版本目录未发现 {filename},将跳过")
            continue

        if 'config.toml' in filename and os.stat(source).st_size == 0:
            logger.warning(f"旧版本中的配置文件是空文件,可能意外损坏了,将不覆盖到本地")
            continue

        # 确保要复制的目标文件所在目录存在
        make_sure_dir_exists(os.path.dirname(destination))

        if os.path.isdir(filename):
            logger.debug(f"覆盖目录 {filename}")
            remove_directory(destination)
            shutil.copytree(source, destination)
        else:
            logger.debug(f"覆盖文件 {filename}")
            remove_file(destination)
            shutil.copyfile(source, destination)
Example #43
0
 def _select(self, sql, first, *args):
     '''
     a litter issue: every mysql operate must ping ! how to slove it?
     '''
     cursor = None
     sql = sql.replace('?', '%s')
     logger.debug("%s" % sql)
     try:
         if self.is_db_connected() is False:
             raise DatabaseExecError()
         cursor = self.db.cursor()
         cursor.execute(sql, args)
         if cursor.description:
             names = [x[0] for x in cursor.description]
         if first:
             values = cursor.fetchone()
             if not values:
                 return None
             return Dict(names, values)
         return [Dict(names, x) for x in cursor.fetchall()]
     except pymysql.Error, err:
         logger.error("execute sql error,err info:%s, sql:%s" % (err, sql))
         self.disconnect_db()
         raise DatabaseExecError()
def add_group_permissions(data, db_session, username):
    logger.info(LogMsg.START, username)

    schema_validate(data, A_GROUP_ADD_SCHEMA_PATH)
    logger.debug(LogMsg.SCHEMA_CHECKED)

    group_id = data.get('group_id')
    permissions = data.get('permissions')

    validate_group(group_id, db_session)
    result = []
    for permission_id in permissions:
        if group_has_permission(permission_id, group_id, db_session):
            logger.error(LogMsg.GROUP_USER_IS_IN_GROUP, {
                'permission_id': permission_id,
                'group_id': group_id
            })
            raise Http_error(409, Message.ALREADY_EXISTS)
        result.append(
            group_permission_to_dict(
                add(permission_id, group_id, db_session, username)))

    logger.info(LogMsg.END)
    return result
Example #45
0
    def _ssh_untrusted(self, cfg, client, comp_id):
        ip = socket.gethostbyname(cfg["host"])
        code = "%s '%s/receiver.py' '%s' '%s' '%s'"%(cfg["python"], cfg["location"], ip, comp_id, self.tmp_dir)
        logger.debug(code)
        ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command(code)
        stdout_channel = ssh_stdout.channel

        # Wait for untrusted side to respond with the bound port using paramiko channels
        # Another option would be to have a short-lived ZMQ socket bound on the trusted
        # side and have the untrusted side connect to that and send the port
        output = ""
        stdout_channel.settimeout(2.0)
        polls = 0
        while output.count("\n")!=2:
            try:
                output += stdout_channel.recv(1024)
            except socket.timeout:
                polls+= 1
            if stdout_channel.closed:
                print "An error occurred getting data from the untrusted side."
                return None
            if polls>20:
                return None
        return int(output.split("\n")[0])
Example #46
0
    def post(self,
             ctx,
             url,
             data=None,
             json=None,
             pretty=False,
             print_res=True,
             is_jsonp=False,
             is_normal_jsonp=False,
             need_unquote=True,
             extra_cookies="",
             check_fn: Callable[[requests.Response],
                                Optional[Exception]] = None):
        def request_fn():
            cookies = self.base_cookies + extra_cookies
            content_type = "application/x-www-form-urlencoded"
            if data is None and json is not None:
                content_type = "application/json"

            post_headers = {
                **self.base_headers,
                **{
                    "Content-Type": content_type,
                    "Cookie": cookies,
                }
            }
            return requests.post(url,
                                 data=data,
                                 json=json,
                                 headers=post_headers,
                                 timeout=self.common_cfg.http_timeout)

        res = try_request(request_fn, self.common_cfg.retry, check_fn)
        logger.debug(f"{data}")
        return process_result(ctx, res, pretty, print_res, is_jsonp,
                              is_normal_jsonp, need_unquote)
Example #47
0
File: rbc4242.py Project: stsdc/svs
 def parse_status(self, response):
     try:
         logger.debug("RbC %s: Received: %s", hex(self._address),
                      binascii.hexlify(response))
         if binascii.hexlify(response[0:3]) == self.prefix():
             response = response[2:]
             return {
                 "current1": h.decode(response[13:16]),  # 13, 14, 15
                 "velocity1": h.decode(response[16:24]),
                 "position1": h.decode(response[24:32]),
                 "current2": h.decode(response[32:35]),
                 "velocity2": h.decode(response[35:43]),
                 "position2": h.decode(response[43:51]),
                 "current3": h.decode(response[51:54]),
                 "velocity3": h.decode(response[54:62]),
                 "position3": h.decode(response[62:70]),
                 "current4": h.decode(response[70:73]),
                 "velocity4": h.decode(response[73:81]),
                 "position4": h.decode(response[81:89]),
             }
         else:
             return False
     except IndexError as e:
         logger.error("RbC %s: %s", hex(self._address), e)
Example #48
0
def index():
    """Method used when user goes to the homepage"""
    # All possible variables that can be given to the Jinja template.
    keys = [
        'color_animate', 'duration_animate', 'ease_animate', 'color_toggle',
        'status_toggle'
    ]
    color_animate, duration_animate, ease_animate, color_toggle, status_toggle = [
        session.get(key) if key in session else "" for key in keys
    ]
    # If someone without a previous session goes to /, there is no status_toggle
    status_toggle = status_toggle or 'Click'
    logger.debug("Request data: {ca}, {da}, {ea}, {ct}, {st}".format(
        ca=color_animate,
        da=duration_animate,
        ea=ease_animate,
        ct=color_toggle,
        st=status_toggle))
    return render_template('index.html',
                           color_animate=color_animate,
                           duration_animate=duration_animate,
                           ease_animate=ease_animate,
                           color_toggle=color_animate,
                           status_toggle=status_toggle)
Example #49
0
def get(id, db_session, username):
    logger.info(LogMsg.START, username)
    user = check_user(username, db_session)
    per_data = {}
    if is_group_member(user.person_id, id, db_session):
        per_data.update({Permissions.IS_OWNER.value: True})

    logger.debug(LogMsg.PERMISSION_CHECK, username)
    validate_permissions_and_access(username, db_session, 'DISCUSSION_GROUP',
                                    per_data)
    logger.debug(LogMsg.PERMISSION_VERIFIED, username)

    logger.debug(LogMsg.MODEL_GETTING)
    model_instance = db_session.query(DiscussionGroup).filter(
        DiscussionGroup.id == id).first()
    if model_instance:
        result = discuss_group_to_dict(model_instance, db_session)
        logger.debug(LogMsg.GET_SUCCESS, result)
    else:
        logger.debug(LogMsg.MODEL_GETTING_FAILED, {"discuss_group_id": id})
        raise Http_error(404, Message.NOT_FOUND)
    logger.info(LogMsg.END)

    return result
Example #50
0
def get(id, db_session, username, **kwargs):
    logger.info(LogMsg.START, username)
    result = None
    user = check_user(username, db_session)

    try:
        logger.debug(LogMsg.MODEL_GETTING, id)
        model_instance = db_session.query(ChatMessage).filter(
            ChatMessage.id == id).first()
        if model_instance.group_id is not None:
            if not is_group_member(user.person_id, model_instance.group_id,
                                   db_session):
                logger.error(LogMsg.CHAT_PERSON_NOT_IN_GROUP, username)
                raise Http_error(403, Message.PERSON_CANT_DELETE_MESSAGE)

        permission_data = {}
        if model_instance.sender_id == user.person_id or \
                model_instance.receptor_id == user.person_id or (
                model_instance.group_id is not None and is_group_member(
            user.person_id, model_instance.group_id, db_session)):
            permission_data.update({Permissions.IS_OWNER.value: True})

        permissions, presses = get_user_permissions(username, db_session)

        has_permission([Permissions.CHAT_DELETE_PREMIUM], permissions, None,
                       permission_data)

        logger.debug(LogMsg.PERMISSION_VERIFIED, username)
        update_last_seen(model_instance, user.person_id, db_session)

    except:
        logger.exception(LogMsg.GET_FAILED, exc_info=True)
        Http_error(404, Message.NOT_FOUND)

    logger.info(LogMsg.END)
    return result
Example #51
0
def train(x, y, net, optimizer, loss_func, device, batch_size=64, n_epochs=50, verbose=True):
    x = torch.tensor(x, dtype=torch.float32).to(device)
    y = torch.tensor(y, dtype=torch.int64).to(device)
    train_dataset = Data.TensorDataset(x, y)
    train_loader = Data.DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0
    )

    for epoch in tqdm(range(n_epochs)):
        epoch_loss = 0.0
        n_batches: int = 0
        epoch_start_time = time.time()
        for step, (batch_x, batch_y) in enumerate(train_loader):
            batch_x, batch_y = batch_x.to(device), batch_y.to(device)
            optimizer.zero_grad()
            # out = net(batch_x)
            out = net(batch_x)[0]
            loss = loss_func(out, batch_y)
            loss.backward()
            optimizer.step()

            epoch_loss += loss.item()

            n_batches += 1
            if step == 1000:
                break

        epoch_train_time = time.time() - epoch_start_time
        if (epoch+1) % 5 == 0:
            if verbose:
                logger.debug(f'| Epoch: {epoch + 1:03}/{n_epochs:03} | Train Time: {epoch_train_time:.3f}s'
                             f'| Train Loss: {epoch_loss / n_batches:.6f} | Batch Size({batch_size})')
    return net
Example #52
0
def process_result(ctx, res, pretty=False, print_res=True, is_jsonp=False, is_normal_jsonp=False, need_unquote=True) -> dict:
    fix_encoding(res)

    if res is not None:
        set_last_response_info(res.status_code, res.reason, res.text)

    if is_jsonp:
        data = jsonp2json(res.text, is_normal_jsonp, need_unquote)
    else:
        data = res.json()

    success = is_request_ok(data)

    if print_res:
        logFunc = logger.info
        if not success:
            logFunc = logger.error
    else:
        # 不打印的时候改为使用debug级别,而不是连文件也不输出,这样方便排查问题
        logFunc = logger.debug

    # log增加记录实际调用处
    ctx = get_meaningful_call_point_for_log() + ctx

    processed_data = pre_process_data(data)
    if processed_data is None:
        logFunc(f"{ctx}\t{pretty_json(data, pretty)}")
    else:
        # 如果数据需要调整,则打印调整后数据,并额外使用调试级别打印原始数据
        logFunc(f"{ctx}\t{pretty_json(processed_data, pretty)}")
        logger.debug(f"{ctx}(原始数据)\t{pretty_json(data, pretty)}")

    global last_process_result
    last_process_result = data

    return data
Example #53
0
def search_by_title(data, db_session):
    logger.info(LogMsg.START)

    search_phrase = data.get('search_phrase')
    skip = data.get('skip', 0)
    limit = data.get('limit', 20)

    logger.debug(LogMsg.SEARCH_BOOK_BY_TITLE, search_phrase)

    try:
        result = []
        books = db_session.query(Book).filter(
            Book.title.like('%{}%'.format(search_phrase))).order_by(
                Book.creation_date.desc()).slice(skip, skip + limit)
        for book in books:
            result.append(book_to_dict(db_session, book))
        logger.debug(LogMsg.GET_SUCCESS)
    except:
        logger.exception(LogMsg.GET_FAILED, exc_info=True)
        raise Http_error(404, Message.NOT_FOUND)

    logger.info(LogMsg.END)

    return result
Example #54
0
 def init_view(self, path):
     self.current_selection = None
     self.current_path = path
     logger.debug("Current Path: %s" % self.current_path)
     #list_json = cloudapi.list_path(self.current_path,500,
     #								settings.DRY,self.bdstoken)
     utils.async_call(cloudapi.list_path,
                      self.current_path,
                      500,
                      settings.DRY,
                      self.bdstoken,
                      callback=self.populate_view)
     #logger.debug("Size of list json: %s"%str(len(list_json)))
     #file_list = cloudapi.get_list(list_json)
     #logger.debug("Size of file_list: %s"%str(len(file_list)))
     #self.liststore.clear()
     #self.fill_liststore(file_list)
     #self.loading_spin.start()
     #self.loading_spin.show_all()
     self.spinn = SpinnerDialog(self)
     self.spinn.show()
     file_list = []
     pix_list = self.get_pix_list(file_list)
     self.fill_liststore(pix_list)
def delete(id, db_session, username=None):
    logger.info(LogMsg.START, username)

    price = get_by_id(id, db_session)
    if price is None:
        logger.error(LogMsg.NOT_FOUND, {'book_price_id': id})
        raise Http_error(404, Message.NOT_FOUND)

    book = get_book(price.book_id, db_session)

    if username is not None:
        per_data = {}
        permissions, presses = get_user_permissions(username, db_session)
        if book.creator == username:
            per_data.update({Permissions.IS_OWNER.value: True})
        has_permit = has_permission_or_not([Permissions.PRICE_DELETE_PREMIUM],
                                           permissions, None, per_data)
        if not has_permit:
            if book.press in presses:
                has_permission([Permissions.PRICE_DELETE_PRESS], permissions)
            else:
                logger.error(LogMsg.PERMISSION_DENIED, username)
                raise Http_error(403, Message.ACCESS_DENIED)
        logger.debug(LogMsg.PERMISSION_VERIFIED)


    try:
        db_session.delete(price)
        logger.debug(LogMsg.DELETE_SUCCESS, {'book_price_id': id})
    except:
        logger.exception(LogMsg.DELETE_FAILED, exc_info=True)
        raise Http_error(404, Message.DELETE_FAILED)

    logger.info(LogMsg.END)

    return Http_response(204, True)
Example #56
0
def add_book_to_collections(data, db_session, username):
    logger.info(LogMsg.START, username)

    check_schema(['book_ids', 'collections'], data.keys())
    if 'person_id' in data:
        person_id = data.get('person_id')
    else:
        user = check_user(username, db_session)
        if user is None:
            raise Http_error(404, Message.INVALID_USER)
        if user.person_id is None:
            logger.error(LogMsg.USER_HAS_NO_PERSON, username)
            raise Http_error(404, Message.INVALID_USER)
        person_id = user.person_id

    books_ids = data.get('book_ids')
    logger.debug(LogMsg.LIBRARY_CHECK_BOOK_EXISTANCE, books_ids)
    if not books_are_in_lib(person_id, books_ids, db_session):
        raise Http_error(404, Message.BOOK_NOT_IN_LIB)

    logger.debug(LogMsg.COLLECTION_ADD_BOOK_TO_MULTIPLE_COLLECTIONS, data)
    for collection_title in data.get('collections'):
        if not collection_exists(collection_title, person_id, db_session):
            logger.error(LogMsg.NOT_FOUND, {
                'collection_tilte': collection_title,
                'person_id': person_id
            })
        addition_data = {
            'book_ids': books_ids,
            'title': collection_title,
            'person_id': person_id
        }
        add(addition_data, db_session, 'internal')
    logger.info(LogMsg.END)

    return data
Example #57
0
def get_all(data, db_session, username, **kwargs):
    logger.info(LogMsg.START, username)

    if data.get('sort') is None:
        data['sort'] = ['creation_date-']

    logger.debug(LogMsg.PERMISSION_CHECK, username)
    validate_permissions_and_access(username, db_session, 'COMMENT_GET')
    logger.debug(LogMsg.PERMISSION_VERIFIED, username)

    try:
        res = Comment.mongoquery(
            db_session.query(Comment)).query(**data).end().all()
        result = []
        for item in res:
            comment = comment_to_dict(db_session, item, username)
            book = get_book(item.book_id, db_session)
            comment['book'] = book
            result.append(comment)
    except:
        logger.exception(LogMsg.GET_FAILED, exc_info=True)
        raise Http_error(404, Message.NOT_FOUND)
    logger.info(LogMsg.END)
    return result
Example #58
0
    def edit(self, id, db_session, data, username, permission_checked=False):
        logger.info(LogMsg.START, username)

        logger.debug(LogMsg.EDIT_REQUST, {'store_id': id, 'data': data})
        model_instance = super(StoreController, self).get(id, db_session)
        if 'store_code' in data:
            del data['store_code']
        if 'id' in data:
            del data['id']
        if 'name' in data:
            data['store_code'] = '{}-{}'.format(data['name'], randint(1000000, 9999999))
        try:
            super(StoreController, self).edit(id, data, db_session, username,
                                              permission_checked)

            logger.debug(LogMsg.MODEL_ALTERED, model_instance.to_dict())

            # TODO change def unique
        except:
            logger.exception(LogMsg.EDIT_FAILED, exc_info=True)
            raise Http_error(500, Message.DELETE_FAILED)

        logger.info(LogMsg.END)
        return model_instance.to_dict()
Example #59
0
    def configure_secret_token(self):
        token_file_path = os.path.join(self.options.chorus_path, "shared")
        token_file = os.path.join(token_file_path, 'secret.token')

        if not os.path.exists(token_file):
            with open(token_file, 'w') as f:
                f.write(os.urandom(64).encode('hex'))
        else:
            logger.debug(token_file + " already existed, skipped")
        logger.debug("Configuring secret token...")
        logger.debug("Secure " + token_file)
        os.chmod(token_file, 0600)
        symbolic = os.path.join(self.release_path, "config/secret.token")
        logger.debug("Create symbolic to " + symbolic)
        if os.path.lexists(symbolic):
            os.remove(symbolic)
        os.symlink(token_file, symbolic)
    def resetSerialConnection(self, comm_port, baud):
        """
        Reset the Serial Port device to read in serial data
        """
        self.comm_port = comm_port.strip()
        self.baud = baud
        logger.debug("comm_port: " + self.comm_port)
        logger.debug("Baud: " + str(self.baud))

        logger.debug("Disconnect serial port")
        self.factory.serial_port.loseConnection()

        self.factory.serial_port = SerialPort(SerialDevice(self, self),
                                              self.comm_port,
                                              reactor,
                                              baudrate=self.baud)
        logger.debug('Serial Port Restarted')