Ejemplo n.º 1
0
    def test_ddGraph(self):
        """
        Graph is using dd to read a file and write to another. This is mainly
        to test that the separatorString parameter is working correctly.
        """
        sessionId = "lalo"
        ddGraph = "graphs/ddTest.graph"
        with pkg_resources.resource_stream("test",
                                           ddGraph) as f:  # @UndefinedVariable
            logger.debug(f"Loading graph: {f}")
            graphSpec = json.load(f)
        self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec)

        # Deploy now and get OIDs
        bs = graphSpec[0]["applicationArgs"]["bs"]["value"]
        count = graphSpec[0]["applicationArgs"]["count"]["value"]
        self.dim.deploySession(sessionId)
        a, c = [
            self.dm._sessions[sessionId].drops[x]
            for x in ("2022-02-11T08:05:47_-5_0", "2022-02-11T08:05:47_-3_0")
        ]

        data = os.urandom(bs * count)
        logger.debug(f"Length of data produced: {len(data)}")
        with droputils.DROPWaiterCtx(self, c, 3):
            a.write(data)
            a.setCompleted()

        self.assertEqual(data, droputils.allDropContents(c))
Ejemplo n.º 2
0
    def test_namedPorts(self):
        """
        Use a graph with named ports and check whether it is runnning
        """
        init_oid = "2022-03-20T04:33:27_-2_0"  # first drop in graph
        sessionId = "lalo"
        with pkg_resources.resource_stream(
                "test", "graphs/funcTestPG_namedPorts.graph"
        ) as f:  # @UndefinedVariable
            graphSpec = json.load(f)
        # dropSpecs = graph_loader.loadDropSpecs(graphSpec)
        self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec)

        # Deploy now and get OIDs
        self.dim.deploySession(sessionId)
        fd = self.dm._sessions[sessionId].drops["2022-03-20T04:33:27_-1_0"]
        init_drop = self.dm._sessions[sessionId].drops[init_oid]
        a = InMemoryDROP("a", "a")
        init_drop.addInput(a)
        logger.debug(f"PyfuncAPPDrop: {dir(fd)}")
        for i in fd.parameters["inputs"]:
            logger.debug(f"PyfuncAPPDrop input names:{i}")

        with droputils.DROPWaiterCtx(self, init_drop, 3):
            a.setCompleted()
Ejemplo n.º 3
0
    def DeleteNoLocal(self, _nodellist):
        try:
            if not _nodellist and len(_nodellist) <= 0:
                return

            module_path = os.path.join(self.ApacheDocsPath, "modules")
            if os.path.exists(module_path):
                m_paths = getListFiles(module_path)
                for path in m_paths:
                    if not (path.lower() in _nodellist):
                        os.remove(path)
                        logger.debug("删除文件", path)

                if os.path.exists(module_path):
                    m_dirs = getListDirs(module_path)
                    pathstr = "C:\\thunder\\Apache\\htdocs\\modules\\moduledatatype"
                    for dir in m_dirs:
                        print(dir)
                        if pathstr.lower() == dir.lower():
                            continue
                        if not (dir in _nodellist):
                            print(os.path.getmtime(dir))
                            mtime = time.strftime(
                                '%m', time.localtime(os.path.getmtime(dir)))
                            ctime = time.strftime('%m',
                                                  time.localtime(time.time()))
                            if os.path.exists(dir) and (int(ctime) - int(mtime)
                                                        >= 1):
                                try:
                                    os.remove(dir)
                                except:
                                    pass
        except Exception as e:
            print("traceback.format_exc()", traceback.format_exc())
Ejemplo n.º 4
0
    def test_namedPorts_with_kwonlyargs(self):
        """
        Use a graph with named ports and check whether it is runnning
        """
        init_oids = [
            "2022-03-30T03:46:01_-2_0",
            "2022-03-30T03:46:01_-6_0",
        ]  # first drops in graph
        sessionId = "lalo"
        with pkg_resources.resource_stream(
                "test",
                "graphs/pyfunc_glob_testPG.graph") as f:  # @UndefinedVariable
            graphSpec = json.load(f)
        # dropSpecs = graph_loader.loadDropSpecs(graphSpec)
        self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec)

        # Deploy now and get OIDs
        self.dim.deploySession(sessionId)
        fd = self.dm._sessions[sessionId].drops["2022-03-30T03:46:01_-1_0"]
        i = 0
        start_drops = [InMemoryDROP(x, x) for x in ("a", "b")]
        for oid in init_oids:
            init_drop = self.dm._sessions[sessionId].drops[oid]
            init_drop.addInput(start_drops[i])
            i += 1
        logger.debug(f"PyfuncAPPDrop: {dir(fd)}")
        for i in fd.parameters["inputs"]:
            logger.debug(f"PyfuncAPPDrop input names:{i}")

        with droputils.DROPWaiterCtx(self, init_drop, 3):
            [a.setCompleted() for a in start_drops]
Ejemplo n.º 5
0
def nearest_ratings(user_id, count, address):
    """Get Nearest Recommended business to user's address """
    logger.debug("User %s rating requested for nearest address", user_id,
                 address)
    ratings = recommendationengine.get_nearest_businesses(
        user_id, count, address)
    return json.dumps(ratings)
Ejemplo n.º 6
0
    def _do_waitpid(self, loop, expected_pid, callback, args):
        assert expected_pid > 0

        try:
            pid, status = os.waitpid(expected_pid, 0)
        except ChildProcessError:
            # The child process is already reaped
            # (may happen if waitpid() is called elsewhere).
            pid = expected_pid
            returncode = 255
            logger.warning(
                "Unknown child process pid %d, will report returncode 255", pid
            )
        else:
            returncode = _compute_returncode(status)
            if loop.get_debug():
                logger.debug(
                    "process %s exited with returncode %s", expected_pid, returncode
                )

        if loop.is_closed():
            logger.warning("Loop %r that handles pid %r is closed", loop, pid)
        else:
            loop.call_soon_threadsafe(callback, pid, returncode, *args)

        self._threads.pop(expected_pid)
Ejemplo n.º 7
0
    def _on_handshake_complete(self, handshake_exc):
        self._in_handshake = False

        sslobj = self._sslpipe.ssl_object
        try:
            if handshake_exc is not None:
                raise handshake_exc

            peercert = sslobj.getpeercert()
            if not hasattr(self._sslcontext, "check_hostname"):
                # Verify hostname if requested, Python 3.4+ uses check_hostname
                # and checks the hostname in do_handshake()
                if (self._server_hostname
                        and self._sslcontext.verify_mode != ssl.CERT_NONE):
                    ssl.match_hostname(peercert, self._server_hostname)
        except BaseException as exc:
            if self._loop.get_debug():
                if isinstance(exc, ssl.CertificateError):
                    logger.warning(
                        "%r: SSL handshake failed "
                        "on verifying the certificate",
                        self,
                        exc_info=True,
                    )
                else:
                    logger.warning("%r: SSL handshake failed",
                                   self,
                                   exc_info=True)
            self._transport.close()
            if isinstance(exc, Exception):
                self._wakeup_waiter(exc)
                return
            else:
                raise

        if self._loop.get_debug():
            dt = self._loop.time() - self._handshake_start_time
            logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)

        # Add extra info that becomes available after handshake.
        self._extra.update(
            peercert=peercert,
            cipher=sslobj.cipher(),
            compression=sslobj.compression(),
            ssl_object=sslobj,
        )
        if self._call_connection_made:
            self._app_protocol.connection_made(self._app_transport)
        self._wakeup_waiter()
        self._session_established = True
        # In case transport.write() was already called. Don't call
        # immediately _process_write_backlog(), but schedule it:
        # _on_handshake_complete() can be called indirectly from
        # _process_write_backlog(), and _process_write_backlog() is not
        # reentrant.
        self._loop.call_soon(self._process_write_backlog)
Ejemplo n.º 8
0
 def _start_handshake(self):
     if self._loop.get_debug():
         logger.debug("%r starts SSL handshake", self)
         self._handshake_start_time = self._loop.time()
     else:
         self._handshake_start_time = None
     self._in_handshake = True
     # (b'', 1) is a special value in _process_write_backlog() to do
     # the SSL handshake
     self._write_backlog.append((b"", 1))
     self._loop.call_soon(self._process_write_backlog)
Ejemplo n.º 9
0
    def resume_writing(self):
        assert self._paused
        self._paused = False
        if self._loop.get_debug():
            logger.debug("%r resumes writing", self)

        waiter = self._drain_waiter
        if waiter is not None:
            self._drain_waiter = None
            if not waiter.done():
                waiter.set_result(None)
Ejemplo n.º 10
0
    def resume_writing(self):
        assert self._paused
        self._paused = False
        if self._loop.get_debug():
            logger.debug("%r resumes writing", self)

        waiter = self._drain_waiter
        if waiter is not None:
            self._drain_waiter = None
            if not waiter.done():
                waiter.set_result(None)
Ejemplo n.º 11
0
 def parse_query(self, parser):
     show, season, episode, title = parser.parse()
     show = os.path.basename(show)
     if show.islower():
         show = show.title()
     self.context['Name'], self.context['TV Season'] = title, season
     for tag in ['Artist', 'Album Artist', 'TV Show']:
         self.context[tag] = show
     self.context['TV Episode #'] = self.context['Track #'] = episode
     self.context['TV Episode ID'] = 'S{}E{}'.format(season, episode)
     self.context['Album'] = '{}, Season {}'.format(show, season)
     LOGGER.debug('Trakt Query: %s', show)
     return show
Ejemplo n.º 12
0
 def _fatal_error(self, exc, message="Fatal error on transport"):
     # Should be called from exception handler only.
     if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
         if self._loop.get_debug():
             logger.debug("%r: %s", self, message, exc_info=True)
     else:
         self._loop.call_exception_handler({
             "message": message,
             "exception": exc,
             "transport": self._transport,
             "protocol": self,
         })
     if self._transport:
         self._transport._force_close(exc)
Ejemplo n.º 13
0
    def _save_status_in_file(self, file_name: str = ''):
        """Print in file the status history."""
        if file_name == '':
            checkFolders(RESULTS_FOLDER)
            file_name = f'{RESULTS_FOLDER}/status_history_{"".join(self.name_.split())}.st'

        logger.debug(
            f'Storing status from node {self.name_} in file {file_name}')

        with open(f'{file_name}', 'w') as file:
            for time, status in self.dds_status_node_.status_history_:
                file.write(
                    f'TIME: {time}\n{AmlDdsStatusNode._str_status_data(status)}\n'
                )
Ejemplo n.º 14
0
 def _read_ready(self):
     message = self._pubsub.get_message()
     if message:
         if isinstance(message['data'], bytes):
             self._protocol.data_received(message['data'])
     else:
         if self._loop.get_debug():
             logger.debug("%r received EOF", self)
         keep_open = self._protocol.eof_received()
         if keep_open:
             # We're keeping the connection open so the
             # protocol can write more, but we still can't
             # receive more, so remove the reader callback.
             self._loop.remove_reader(self._sock_fd)
         else:
             self.close()
Ejemplo n.º 15
0
def _get_pi_solution(split_graph):
    """
        1. create H (admissable graph) based on Section 3
        http://fmdb.cs.ucla.edu/Treports/930014.pdf

        2. calculate the max flow f' on H using networkx

        3. construct Residual graph from f' on H based on
        https://www.topcoder.com/community/data-science/\
        data-science-tutorials/minimum-cost-flow-part-two-algorithms/

        4. calculate Pi based on Section 3 again
        """
    # Step 1
    H = nx.DiGraph()
    H.add_nodes_from(split_graph)
    for ed in split_graph.edges(data=True):
        Cxy = int(ed[2].get("capacity", sys.maxsize))
        Axy = int(ed[2]["weight"])
        logger.debug(f"Found capacity and weight: {Axy}, {Cxy}")
        if Axy == 0 and Cxy > 0:
            H.add_edge(ed[0], ed[1], capacity=Cxy, weight=Axy)

    # Step 2
    flow_value, flow_dict = nx.maximum_flow(H, "s", "t")

    # Step 3
    R = nx.DiGraph()
    R.add_nodes_from(H)
    for ed in H.edges(data=True):
        Xij = flow_dict[ed[0]][ed[1]]
        Uij = ed[2].get("capacity", sys.maxsize)
        Cij = ed[2]["weight"]
        if (Uij - Xij) > 0:
            R.add_edge(ed[0], ed[1], weight=Cij)
        if Xij > 0:
            R.add_edge(ed[1], ed[0], weight=-1 * Cij)

    # Step 4
    pai = dict()
    for n in R.nodes():
        if nx.has_path(R, "s", n):
            pai[n] = 0
        else:
            pai[n] = 1
    return pai
Ejemplo n.º 16
0
    def eof_received(self):
        """Called when the other end of the low-level stream
        is half-closed.
        If this returns a false value (including None), the transport
        will close itself.  If it returns a true value, closing the
        transport is up to the protocol.
        """
        try:
            if self._loop.get_debug():
                logger.debug("%r received EOF", self)

            self._wakeup_waiter(ConnectionResetError)

            if not self._in_handshake:
                keep_open = self._app_protocol.eof_received()
                if keep_open:
                    logger.warning("returning true from eof_received() "
                                   "has no effect when using ssl")
        finally:
            self._transport.close()
Ejemplo n.º 17
0
 def has_artwork(self, url):
     """Attempts to download artwork from the provided URL and write it to a
     .jpg file named '.albumart.jpg' then return True as long as a 2xx HTTP
     response is recieved. If an error should occur, nothing is downloaded
     and False is returned
     """
     if not url:
         return False
     LOGGER.info('Downloading Album Artwork...')
     LOGGER.debug('URL: %s', url)
     req = requests.get(url)
     if 200 <= req.status_code < 300:
         file_name = '.albumart{}.jpg'.format(str(uuid4()))
         LOGGER.info('Writing artwork to %s', file_name)
         with open(file_name, 'wb') as f:
             f.write(req.content)
         return file_name.replace(' ', '\\ ')
     message = 'Album Art Not Downloaded: {}'.format(req.status_code)
     LOGGER.warn(message)
     return False
Ejemplo n.º 18
0
    def DeleteVLocalModule(self, list):

        for item in list:
            fileName = os.path.basename(item.fileurl)
            fileName = os.path.join(self.ApacheDocsPath, "modules", fileName)
            #权限设置
            if os.stat(fileName).st_mode == os.stat.S_IREAD:
                os.chmod(os.stat.S_IRWXU)
            if os.path.exists(fileName):
                os.remove(fileName)
                logger.debug("删除文件", fileName)
                #TODO 删除
                bll = KtvModuleVerBll()
                bll.DeleteVer(item.id)

            dir = item.uppath
            mtime = time.strftime('%m', os.path.getmtime(dir))
            ctime = time.strftime('%m', time.localtime(time.time()))
            if os.path.exists(dir) and (int(ctime) - int(mtime) >= 1):
                os.remove(dir)
Ejemplo n.º 19
0
    def test_pos_only_args(self):
        """
        Use a graph with compile function to test positional only arguments
        """
        sessionId = "lalo"
        with pkg_resources.resource_stream(
                "test", "graphs/compilePG.graph") as f:  # @UndefinedVariable
            graphSpec = json.load(f)
        # dropSpecs = graph_loader.loadDropSpecs(graphSpec)
        self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec)

        # Deploy now and get OIDs
        self.dim.deploySession(sessionId)
        sd = self.dm._sessions[sessionId].drops["2022-05-06T08:43:26_-2_0"]
        fd = self.dm._sessions[sessionId].drops["2022-05-06T08:43:26_-1_0"]
        with droputils.DROPWaiterCtx(self, fd, 3):
            sd.setCompleted()

        #logger.debug(f'PyfuncAPPDrop signature: {dir(fd)}')
        logger.debug(f'PyfuncAPPDrop status: {fd.status}')
        self.assertEqual(2, fd.status)
Ejemplo n.º 20
0
    def test_simple(self):
        """This creates the following graph

        1 --|
            |-- add --> the_sum --------|
        2 --|                           |                                       |--> part1 --|
                                        |--> divide --> division -->partition --|            |--> add -> final
        4 --|                           |                                       |--> part2 --|
            |-- substract --> the_sub --|
        3 --|
        """
        delayed = self.delayed
        compute = self.compute

        the_sum = delayed(add)(1.0, 2.0)
        the_sub = delayed(subtract)(4.0, 3.0)
        division = delayed(divide)(the_sum, the_sub)
        parts = delayed(partition, nout=2)(division)
        logger.debug(f"partitions: {type(parts)}")
        result = 3.0
        result = compute(delayed(add)(*parts))
        self.assertEqual(3.0, result)
Ejemplo n.º 21
0
    def test_ArrayLoop(self):
        """
        Use a graph with compile function to test positional only arguments
        """
        sessionId = "lalo"
        start_drop = InMemoryDROP('a', 'a')
        with pkg_resources.resource_stream(
                "test", "graphs/ArrayLoopPG.graph") as f:  # @UndefinedVariable
            graphSpec = json.load(f)
        # dropSpecs = graph_loader.loadDropSpecs(graphSpec)
        self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec)

        # Deploy now and get OIDs
        self.dim.deploySession(sessionId)
        sd = self.dm._sessions[sessionId].drops["2022-06-22T09:13:53_-1_0"]
        sd.addInput(start_drop)
        fd = self.dm._sessions[sessionId].drops["2022-06-22T09:13:53_-4_0/0/0"]
        with droputils.DROPWaiterCtx(self, fd, 3):
            start_drop.setCompleted()

        #logger.debug(f'PyfuncAPPDrop signature: {dir(fd)}')
        logger.debug(f'PyfuncAPPDrop status: {fd.status}')
        self.assertEqual(2, fd.status)
Ejemplo n.º 22
0
 def tearDownClass(cls):
     logger.debug(f"Removing temp directory {cls._temp}")
     shutil.rmtree(cls._temp, True)
Ejemplo n.º 23
0
def get_user_Ids():
    logger.debug("Users")
    userIds = recommendationengine.get_User_Ids()
    return json.dumps(userIds)
Ejemplo n.º 24
0
def mainIndex():
    logger.debug("User %s rating requested for movie %s")
    return render_template('index.html')
Ejemplo n.º 25
0
def business_ratings(user_id, business_id):
    logger.debug("User %s rating requested for business %s", user_id,
                 business_id)
    ratings = recommendationengine.get_ratings_for_business_ids(
        user_id, [business_id])
    return json.dumps(ratings)
Ejemplo n.º 26
0
def ratings_within_category(user_id, count, category):
    """Get Top Recommendations within the category"""
    logger.debug("User %s rating requested for category %s", user_id, category)
    ratings = recommendationengine.get_business_in_categories(
        user_id, count, category)
    return json.dumps(ratings)
Ejemplo n.º 27
0
def ratings_within_state(user_id, count, state):
    """Get Top Recommendations within the state """
    logger.debug("User %s rating requested for state %s", user_id, state)
    ratings = recommendationengine.get_business_in_state(user_id, count, state)
    data = json.dumps(ratings)
    return data
Ejemplo n.º 28
0
 def pause_writing(self):
     assert not self._paused
     self._paused = True
     if self._loop.get_debug():
         logger.debug("%r pauses writing", self)
Ejemplo n.º 29
0
    def test_fullRound(self):
        """
        A test that exercises most of the REST interface exposed on top of the
        DataIslandManager
        """

        sessionId = "lala"
        restPort = 8989  # don't interfere with EAGLE default port
        args = ["--port", str(restPort), "-N", hostname, "-qqq"]
        dimProcess = tool.start_process("dim", args)

        with testutils.terminating(dimProcess, timeout=10):

            # Wait until the REST server becomes alive
            self.assertTrue(
                utils.portIsOpen("localhost", restPort, timeout=10),
                "REST server didn't come up in time",
            )

            # The DIM is still empty
            sessions = testutils.get(self, "/sessions", restPort)
            self.assertEqual(0, len(sessions))
            dimStatus = testutils.get(self, "", restPort)
            self.assertEqual(1, len(dimStatus["hosts"]))
            self.assertEqual(hostname, dimStatus["hosts"][0])
            self.assertEqual(0, len(dimStatus["sessionIds"]))

            # Create a session and check it exists
            testutils.post(
                self, "/sessions", restPort, '{"sessionId":"%s"}' % (sessionId)
            )
            sessions = testutils.get(self, "/sessions", restPort)
            self.assertEqual(1, len(sessions))
            self.assertEqual(sessionId, sessions[0]["sessionId"])
            self.assertDictEqual(
                {hostname: SessionStates.PRISTINE}, sessions[0]["status"]
            )

            # Add this complex graph spec to the session
            # The UID of the two leaf nodes of this complex.js graph are T and S
            # Since the original complexGraph doesn't have node information
            # we need to add it manually before submitting -- otherwise it will
            # get rejected by the DIM.
            with pkg_resources.resource_stream(
                "test", "graphs/complex.js"
            ) as f:  # @UndefinedVariable
                complexGraphSpec = json.load(codecs.getreader("utf-8")(f))
                logger.debug(f"Loaded graph: {f}")
            for dropSpec in complexGraphSpec:
                dropSpec["node"] = hostname
            testutils.post(
                self,
                "/sessions/%s/graph/append" % (sessionId),
                restPort,
                json.dumps(complexGraphSpec),
            )
            self.assertEqual(
                {hostname: SessionStates.BUILDING},
                testutils.get(self, "/sessions/%s/status" % (sessionId), restPort),
            )

            # Now we deploy the graph...
            testutils.post(
                self,
                "/sessions/%s/deploy" % (sessionId),
                restPort,
                "completed=SL_A,SL_B,SL_C,SL_D,SL_K",
                mimeType="application/x-www-form-urlencoded",
            )
            self.assertEqual(
                {hostname: SessionStates.RUNNING},
                testutils.get(self, "/sessions/%s/status" % (sessionId), restPort),
            )

            # ...and write to all 5 root nodes that are listening in ports
            # starting at 1111
            msg = os.urandom(10)
            for i in range(5):
                utils.write_to(
                    "localhost", 1111 + i, msg, 2
                ), "Couldn't write data to localhost:%d" % (1111 + i)

            # Wait until the graph has finished its execution. We'll know
            # it finished by polling the status of the session
            while (
                SessionStates.RUNNING
                in testutils.get(
                    self, "/sessions/%s/status" % (sessionId), restPort
                ).values()
            ):
                time.sleep(0.2)

            self.assertEqual(
                {hostname: SessionStates.FINISHED},
                testutils.get(self, "/sessions/%s/status" % (sessionId), restPort),
            )
            testutils.delete(self, "/sessions/%s" % (sessionId), restPort)
            sessions = testutils.get(self, "/sessions", restPort)
            self.assertEqual(0, len(sessions))
Ejemplo n.º 30
0
 def pause_writing(self):
     assert not self._paused
     self._paused = True
     if self._loop.get_debug():
         logger.debug("%r pauses writing", self)