示例#1
0
def wikiFeed(inetToolkit, buildType, port):
    # Sumbit request Build Server and Submit.
    schemaTicker = 'tuple<rstring dataId, rstring bot, rstring domain, int32 lenOld,int32 lenNew, rstring SSEdata, rstring SSEevent>'
    #
    #    Define the application
    #
    topo = Topology("WikiFeed")

    tk.add_toolkit(topo, inetToolkit)

    source = topo.source(wikiStream)
    # Only one type of event, verity that I got it right.
    event = source.filter(lambda t: t["SSEevent"] == "message",
                          name="eventFilter")
    event.print(name="eth")

    eventTuple = event.map(lambda t: t, schema=schemaTicker)
    eventWin = eventTuple.last(100).trigger(1)
    rawRequest = op.Sink("com.ibm.streamsx.inet.rest::HTTPTupleView",
                         stream=eventWin,
                         params={
                             'port': 8081,
                             'context': 'gdaxEth',
                             'contextResourceBase': '/base'
                         },
                         name="TupleView")

    #
    #   Compile & Submit the Topology to Streams instance
    #
    streams_conf = common.build_streams_config("StreamingTurbine",
                                               credential.serviceCredentials)
    context.submit(context.ContextTypes.STREAMING_ANALYTICS_SERVICE,
                   topo,
                   config=streams_conf)
示例#2
0
 def _build_only(self, name, topo):
     result = context.submit("TOOLKIT", topo.graph)  # creates tk* directory
     print(name + ' (TOOLKIT):' + str(result))
     assert (result.return_code == 0)
     result = context.submit("BUNDLE", topo.graph)  # creates sab file
     print(name + ' (BUNDLE):' + str(result))
     assert (result.return_code == 0)
示例#3
0
 def test_DifferentPassword(self):
     sc = rest.StreamsConnection('user1', 'pass1')
     self.test_config[ConfigParams.STREAMS_CONNECTION] = sc
     with self.assertRaises(RuntimeError):
         submit(self.test_ctxtype,
                self.topo,
                self.test_config,
                username='******',
                password='******')
示例#4
0
def smokePending(inetToolkit, buildType, port):
    # Specify the Schema going in and out of the Streams operator.
    schemaRequest = 'tuple<int64 key, rstring request, rstring method, rstring pathInfo >'
    schemaResponse = 'tuple<int64 key, rstring request, rstring method, rstring pathInfo, rstring response >'

    topo = Topology("SmokePending")
    # Add extrenal spl functions, this is what is being testing my HTTPRequestProcess()
    tk.add_toolkit(topo, inetToolkit)
    # 'pending_source' is the loopback point.
    pending_source = PendingStream(topo)

    # Convert to Streams' tuple
    rsp = pending_source.stream.map(lambda t: t, schema=schemaResponse)

    rspFormatted = rsp.map(lambda t: json.dumps(t)).as_string()
    rspFormatted.sink(webLog("Output response"))  ## log what we have received.

    rawRequest = op.Map("com.ibm.streamsx.inet.rest::HTTPRequestProcess",
                        stream=rsp,
                        schema = schemaRequest,
                        params={'port': 8080,
                                'webTimeout': 5.0,
                                #'responseJsonAttributeName': 'string',
                                'context':'myStreams',
                                'contextResourceBase': '/base'},
                        name="TupleRequest")
    # write out data
    rawRequest.stream.sink(webLog('Input request'))  ## log what we have received.

    # determine what to work on
    # Filter does not change the type
    upperDo = rawRequest.stream.filter(lambda t: t["pathInfo"] == "/upper", name="upperFilter")
    lowerDo = rawRequest.stream.filter(lambda t: t["pathInfo"] == "/lower", name="lowerFilter")

    # do some processing
    upperDone = upperDo.map(upperString, schema = schemaResponse, name="upperProcess")
    lowerDone = lowerDo.map(lowerString, schema = schemaResponse, name="lowerProcess")
    ##
    processingDone = upperDone.union({lowerDone})

    hack = processingDone.map(lambda t: t, schema=schemaResponse)
    pending_source.complete(hack)  # close the loop
    # hack make it so I do not get this error.
    """ 
    pending_source.complete(processingDone)  # close the loop

SEVERE: Streaming Analytics service (StreamingTurbine): The submitted archive tk5385353137138356122.zip failed to scripts with status failed.
Exception in thread "main" java.lang.IllegalStateException: Error submitting archive for compilation:
"tk5385353137138356122/SmokePending/SmokePending.spl:5:1: CDISP0011E ERROR: A syntax error exists at the '}' token in 'graph'."
    """

    #
    #   Compile & Submit the Topology to Streams instance
    #

    streams_conf = common.build_streams_config("StreamingTurbine", credential.serviceCredentials)
    context.submit(context.ContextTypes.STREAMING_ANALYTICS_SERVICE, topo, config=streams_conf)
示例#5
0
def build_ingest_app(config, streams_config):
    gerrit_stream = gerrit.GerritEvents(**config['gerrit'])

    ingest_topo = topology.Topology(INGEST_JOB_NAME)
    event_stream = ingest_topo.source(gerrit_stream)
    event_stream.publish('openstack_review_stream')

    # ship it off to service to build and run
    context.submit('ANALYTICS_SERVICE', ingest_topo, config=streams_config)
def commonSubmit(_cfg,
                 streams_cloud_service,
                 _topo,
                 credential=None,
                 DEBUG_BUILD=False):
    """submit to either the cloud or CP4D
    Args:
        _cfg: when submitting from CP4D.
        streams_cloud_service (str) : when submitting to cloud, the name of the service credential.py
        must appropriate mapping
        _topo (topology): topology to submit
        DEBUG_BUILD : build and do push archive, print out location of archive.
    Returns:
        What does it return
    Raises:
        NameYourError: The ``Raises`` is a list of all the exceptions
    Hints :
        -  Credentials are from module credentia
        -  fetching the


    """
    if _cfg is not None:
        # Disable SSL certificate verification if necessary
        _cfg[context.ConfigParams.SSL_VERIFY] = False
        submission_result = context.submit("DISTRIBUTED", _topo, config=_cfg)
    if _cfg is None:

        cloud = {
            context.ConfigParams.VCAP_SERVICES: credential.vcap_conf,
            context.ConfigParams.SERVICE_NAME: streams_cloud_service,
            context.ContextTypes.STREAMING_ANALYTICS_SERVICE:
            "STREAMING_ANALYTIC",
            context.ConfigParams.FORCE_REMOTE_BUILD: True,
        }
        if not DEBUG_BUILD:
            submission_result = context.submit("STREAMING_ANALYTICS_SERVICE",
                                               _topo,
                                               config=cloud)
        else:
            submission_result = context.submit("BUILD_ARCHIVE",
                                               _topo,
                                               config=cloud)
            print("archive can be found:", submission_result)

    # The submission_result object contains information about the running application, or job
    if submission_result.job:
        report = "JobId:{} Name:{} ".format(submission_result['id'],
                                            submission_result['name'])
    else:
        report = "Somthing did work:{}".format(submission_result)
示例#7
0
def build_filter_app(config, streams_config):
    def comments_only(data):
        jdata = json.loads(data)
        print(jdata, file=sys.stderr)
        return jdata["type"] == "comment-added"

    # define our streams app
    filter_topo = topology.Topology(FILTER_JOB_NAME)
    event_stream2 = filter_topo.subscribe('openstack_review_stream')
    comments_stream = event_stream2.filter(comments_only)
    comments_stream.publish('comments')
    c_view = comments_stream.view(name=COMMENT_VIEW_NAME)

    # ship it off to service to build and run
    context.submit('ANALYTICS_SERVICE', filter_topo, config=streams_config)
 def _launch_sample_job(self):
     # this job is monitored by test.jobs::TestJobStatusSource application
     # PE crash is forced by this application in order to trigger a notification
     topo = Topology("SampleCrashApp")
     self._add_toolkits(topo)
     # Call the crash composite
     test_op = op.Source(topo, "test.jobs::SampleCrashSource",
                         'tuple<boolean dummy>')
     # prepare config and submit the job to Streaming Analytics service
     config = {}
     sc = sr.StreamingAnalyticsConnection()
     config[context.ConfigParams.STREAMS_CONNECTION] = sc
     context.submit(context.ContextTypes.STREAMING_ANALYTICS_SERVICE,
                    topo,
                    config=config)
示例#9
0
def gdaxFeed(inetToolkit, buildType, port):
    # Sumbit request Build Server and Submit.
    schemaTicker = 'tuple<rstring ttype, float32 price, float32 low_24h, float32 best_ask, rstring side, float32 best_bid, float32 open_24h, rstring product_id, int32 sequence, int32 trade_id, rstring time, float32 last_size, float32 volume_24h, float32 volume_30d, float32 high_24h>'
    #
    #    Define the application
    #
    topo = Topology("GdaxFeed")

    tk.add_toolkit(topo, inetToolkit)

    source = topo.source(gdaxData)
    # Split out the securities : ETH-USD, LTC-USD, BTC-USD
    eth = source.filter(lambda t: t["product_id"] == "ETH-USD",
                        name="ethFilter")
    ltc = source.filter(lambda t: t["product_id"] == "LTC-USD",
                        name="ltcFilter")
    btc = source.filter(lambda t: t["product_id"] == "LTC-USD",
                        name="btcFilter")
    eth.print(name="eth")
    ltc.print(name="ltc")
    btc.print(name="btc")

    ethTuple = eth.map(lambda t: t, schema=schemaTicker)
    #ethWin = ethTuple.last(datetime.timedelta(minutes=2))
    ethWin = ethTuple.last(100).trigger(1)
    rawRequest = op.Sink("com.ibm.streamsx.inet.rest::HTTPTupleView",
                         stream=ethWin,
                         params={
                             'port': 8080,
                             'context': 'gdaxEth',
                             'contextResourceBase': '/base'
                         },
                         name="TupleView")

    #
    #   Compile & Submit the Topology to Streams instance
    #
    streams_conf = common.build_streams_config("StreamingTurbine",
                                               credential.serviceCredentials)
    context.submit(context.ContextTypes.STREAMING_ANALYTICS_SERVICE,
                   topo,
                   config=streams_conf)

    def test():
        print("enter")
        tmp = gdaxData()
        while True:
            print(tmp.__next__())
示例#10
0
 def _launch(self, topo):
     rc = context.submit('STREAMING_ANALYTICS_SERVICE', topo,
                         self.test_config)
     print(str(rc))
     if rc is not None:
         if (rc.return_code == 0):
             rc.job.cancel()
示例#11
0
    def test_image_name_image_tag(self):
        topo = Topology("test_image_name_image_tag")
        heartbeat = topo.source(lambda: itertools.count())
        heartbeat.print()

        image_name = 'py-tst'
        image_tag = 'v1.0'
        cfg = {ConfigParams.SSL_VERIFY: False}
        jc = JobConfig()
        jc.raw_overlay = {
            'edgeConfig': {
                'imageName': image_name,
                'imageTag': image_tag,
                'pipPackages': ['pandas', 'numpy'],
                'rpms': ['atlas-devel']
            }
        }
        jc.add(cfg)
        try:
            submission_result = submit(ContextTypes.EDGE, topo.graph, cfg)
            print(str(submission_result))
            self.assertTrue(submission_result is not None)
            self.assertTrue(self._is_not_blank(submission_result.image))
            self.assertTrue(self._is_not_blank(submission_result.imageDigest))
            self.assertTrue(image_name in submission_result.image)
            self.assertTrue(image_tag in submission_result.image)
        except RuntimeError as e:
            print(str(e))
            self.skipTest("Skip test, CPD does not support EDGE.")
示例#12
0
def submitToStreams(topology, streams_cfg, streams_instance):
    """Cancel if same name job is active and submit.
        Args:
            toplogy: streams application topology
            streams_cfg : connection information - from get_instance()
            streams_instance :  from get_instance()
        Notes:
            Create local copy of the streams config so this can be thread-safe
    """
    local_streams_cfg = dict(streams_cfg)

    # Cancel the job from the instance if it is already running...
    for job in streams_instance.get_jobs():
        if job.name == topology.name:
            print("Cancelling old job:", job.name)
            job.cancel()

    # Set the job config
    job_config = context.JobConfig(job_name=topology.name, tracing="debug")
    job_config.add(local_streams_cfg)

    # Actually submit the job
    print("Building and submitting new job:", topology.name)
    submission_result = context.submit('DISTRIBUTED', topology,
                                       local_streams_cfg)
    return submission_result
示例#13
0
def cloudSubmit(instance, streams_cloud_service, topology, credential):
    """submit cloud streams instance
    Args:
        instance : when submitting from CP4D or Cloud
        streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping
        topology : topology to submit
        credential : streams instance cloud credential

    """
    for job in instance.get_jobs():
        if job.name.find(topology.name) > 0:
            print("Cancelling old job:", job.name)
            job.cancel()
    cloud = {
        context.ConfigParams.VCAP_SERVICES: credential.vcap_conf,
        context.ConfigParams.SERVICE_NAME: streams_cloud_service,
        context.ContextTypes.STREAMING_ANALYTICS_SERVICE: "STREAMING_ANALYTIC",
        context.ConfigParams.FORCE_REMOTE_BUILD: True,
    }
    submission_result = context.submit("STREAMING_ANALYTICS_SERVICE",
                                       topology,
                                       config=cloud)

    # The submission_result object contains information about the running application, or job
    if submission_result.job:
        report = "JobId:{} Name:{} ".format(submission_result['id'],
                                            submission_result['name'])
    else:
        report = "Somthing did work:{}".format(submission_result)
示例#14
0
 def test_KeepArtifacts(self):
     if ("TestBundleMethodsNew" in str(self)):
         if 'STREAMS_INSTALL' not in os.environ:
             self.skipTest('TestBundleMethodsNew requires STREAMS_INSTALL')
     self.test_config['topology.keepArtifacts'] = True
     self.result = submit(self.test_ctxtype, self.topo, self.test_config)
     verifyArtifacts(self)
示例#15
0
def submitProcess(topology=None,
                  streamsService=None,
                  serviceType=None,
                  buildType=None,
                  jobName=None,
                  cancel=False):
    #
    #   With the composed topology, process it. Build a bundle, submit to build server or (eventually)
    #   send to ICP.
    #   - streamsService : where appication will execute
    #   - serviceType : buildArchive | streams Application
    #   - send to build server then onto streams Service
    #

    print("Submission parameters:")
    print("  - serviceType:%s" % serviceType)
    print("  - buildType:%s" % buildType)
    print("  - jobName:%s" % jobName)
    print("  - cancel:%s" % cancel)
    streams_conf = build_streams_config(
        streamsService, credential.StreamsServices[streamsService])
    #
    if cancel:
        cancel_job(streamsService,
                   streams_conf,
                   name=jobName,
                   namespace=jobName)
    # submit
    if (serviceType == "BUILD_ARCHIVE"):
        contextType = context.ContextTypes.BUILD_ARCHIVE
    else:
        contextType = context.ContextTypes.STREAMING_ANALYTICS_SERVICE
    submitStatus = context.submit(contextType, topology, config=streams_conf)
    return submitStatus
示例#16
0
 def _standalone_test(self, config):
     """ Test using STANDALONE.
     Success is solely indicated by the process completing and returning zero.
     """
     sr = stc.submit(stc.ContextTypes.STANDALONE, self.topology, config)
     self.submission_result = sr
     self.result = {'passed': sr['return_code'], 'submission_result': sr}
     return sr['return_code'] == 0
示例#17
0
def _submit_build(cmd_args, topo):
     cfg = {}
     # Only supported for remote builds.
     cfg[ConfigParams.FORCE_REMOTE_BUILD] = True
     if cmd_args.disable_ssl_verify:
         cfg[ConfigParams.SSL_VERIFY] = False
         urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
     _sc_options(cmd_args, cfg)
     return submit('BUNDLE', topo, cfg)
示例#18
0
def _submit(cmd_args, app):
    cfg = app[1]
    if cmd_args.create_bundle:
        ctxtype = ctx.ContextTypes.BUNDLE
    elif cmd_args.service_name:
        cfg[ctx.ConfigParams.FORCE_REMOTE_BUILD] = True
        cfg[ctx.ConfigParams.SERVICE_NAME] = cmd_args.service_name
        ctxtype = ctx.ContextTypes.STREAMING_ANALYTICS_SERVICE
    sr = ctx.submit(ctxtype, app[0], cfg)
    return sr
示例#19
0
 def _distributed_test(self, config, username, password):
     sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
     self.submission_result = sjr
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to distributed instance.")
         return False
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         self.streams_connection = self.submission_result.job.rest_client._sc
     return self._distributed_wait_for_result(stc.ContextTypes.DISTRIBUTED, config)
示例#20
0
 def _streaming_analytics_test(self, ctxtype, config):
     sjr = stc.submit(ctxtype, self.topology, config)
     self.submission_result = sjr
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         self.streams_connection = Tester._get_sas_conn(config)
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to Streaming Analytics instance")
         return False
     return self._distributed_wait_for_result(ctxtype, config)
示例#21
0
 def _streaming_analytics_test(self, ctxtype, config):
     sjr = stc.submit(ctxtype, self.topology, config)
     self.submission_result = sjr
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         self.streams_connection = Tester._get_sas_conn(config)
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to Streaming Analytics instance")
         return False
     return self._distributed_wait_for_result(ctxtype, config)
示例#22
0
 def _distributed_test(self, config, username, password):
     sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
     self.submission_result = sjr
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to distributed instance.")
         return False
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         self.streams_connection = self.submission_result.job.rest_client._sc
     return self._distributed_wait_for_result(stc.ContextTypes.DISTRIBUTED, config)
示例#23
0
 def _standalone_test(self, config):
     """ Test using STANDALONE.
     Success is solely indicated by the process completing and returning zero.
     """
     if self._run_for:
         config = config.copy()
         config['topology.standaloneRunTime'] = self._run_for + 5.0
     sr = stc.submit(stc.ContextTypes.STANDALONE, self.topology, config)
     self.submission_result = sr
     self.result = {'passed': sr['return_code'], 'submission_result': sr}
     return sr['return_code'] == 0
示例#24
0
 def _streaming_analytics_test(self, ctxtype, config):
     sjr = stc.submit(ctxtype, self.topology, config)
     self.submission_result = sjr
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         vcap_services = config.get(ConfigParams.VCAP_SERVICES)
         service_name = config.get(ConfigParams.SERVICE_NAME)
         self.streams_connection = StreamingAnalyticsConnection(vcap_services, service_name)
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to Streaming Analytics instance")
         return False
     return self._distributed_wait_for_result(ctxtype, config)
示例#25
0
 def _streaming_analytics_test(self, ctxtype, config):
     sjr = stc.submit(ctxtype, self.topology, config)
     self.submission_result = sjr
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         vcap_services = config.get(ConfigParams.VCAP_SERVICES)
         service_name = config.get(ConfigParams.SERVICE_NAME)
         self.streams_connection = StreamingAnalyticsConnection(vcap_services, service_name)
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to Streaming Analytics instance")
         return False
     return self._distributed_wait_for_result(ctxtype, config)
示例#26
0
 def _launch_sample_job(self):
     # this job is monitored by test.jobs::TestJobStatusSource application
     # PE crash is forced by this application in order to trigger a notification
     topo = Topology("SampleCrashApp")
     self._add_toolkits(topo)
     # Call the crash composite
     test_op = op.Source(topo, "test.jobs::SampleCrashSource",
                         'tuple<boolean dummy>')
     config = {}
     config[context.ConfigParams.SSL_VERIFY] = False
     return context.submit(context.ContextTypes.DISTRIBUTED,
                           topo,
                           config=config)
示例#27
0
def _submit_topology(cmd_args, app):
    """Submit a Python topology to the service.
    This includes an SPL main composite wrapped in a Python topology.
    """
    cfg = app.cfg
    if cmd_args.create_bundle:
        ctxtype = ctx.ContextTypes.BUNDLE
    elif cmd_args.service_name:
        cfg[ctx.ConfigParams.FORCE_REMOTE_BUILD] = True
        cfg[ctx.ConfigParams.SERVICE_NAME] = cmd_args.service_name
        ctxtype = ctx.ContextTypes.STREAMING_ANALYTICS_SERVICE
    sr = ctx.submit(ctxtype, app.app, cfg)
    return sr
示例#28
0
 def _distributed_test(self, config, username, password):
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         # Supply a default StreamsConnection object with SSL verification disabled, because the default
         # streams server is not shipped with a valid SSL certificate
         self.streams_connection = StreamsConnection(username, password)
         self.streams_connection.session.verify = False
         config[ConfigParams.STREAMS_CONNECTION] = self.streams_connection
     sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
     self.submission_result = sjr
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to distributed instance.")
         return False
     return self._distributed_wait_for_result()
示例#29
0
 def _distributed_test(self, config, username, password):
     self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
     if self.streams_connection is None:
         # Supply a default StreamsConnection object with SSL verification disabled, because the default
         # streams server is not shipped with a valid SSL certificate
         self.streams_connection = StreamsConnection(username, password)
         self.streams_connection.session.verify = False
         config[ConfigParams.STREAMS_CONNECTION] = self.streams_connection
     sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
     self.submission_result = sjr
     if sjr['return_code'] != 0:
         _logger.error("Failed to submit job to distributed instance.")
         return False
     return self._distributed_wait_for_result(stc.ContextTypes.DISTRIBUTED, config)
    def test_spl_generation(self):
        name = 'test_spl_generation'
        topo = Topology(name)
        scriptname = os.path.basename(__file__)[:-3]
        service_documentation={
          'title': '__SERVICE__TITLE__',
          'description': '__SERVICE__DESC__',
        }
        endpoint_documentation = dict()
        endpoint_documentation['summary'] = '__ENDPOINT__SUMMARY__'
        endpoint_documentation['tags'] = ['__ENDPOINT__TAG1__', '__ENDPOINT__TAG2__']
        endpoint_documentation['description'] = '__ENDPOINT__DESC__'
        doc_attr = dict()
        descr = {'id': {'description': '__ENDPOINT__ATTR1__DESC__'}}
        doc_attr.update(descr)
        descr = {'num': {'description': '__ENDPOINT__ATTR2__DESC__'}}
        doc_attr.update(descr)
        endpoint_documentation['attributeDescriptions'] = doc_attr

        stream1 = topo.source(lambda : itertools.count()).as_string()
        stream1.for_each(EndpointSink(
            consuming_reads=True,
            service_documentation=service_documentation,
            endpoint_documentation=endpoint_documentation))

        submission_result = submit(ContextTypes.TOOLKIT, topo)
        # check generated SPL file
        splfile = submission_result['toolkitRoot']+'/'+scriptname+'/'+name+'.spl'
        with open(splfile, "r") as fileHandle:
            ep_annotation = [line.strip() for line in fileHandle if "@endpoint" in line]
        print(str(ep_annotation))
        self.assertTrue('__ENDPOINT__SUMMARY__' in str(ep_annotation), msg=ep_annotation)
        self.assertTrue('__ENDPOINT__TAG1__' in str(ep_annotation), msg=ep_annotation)
        self.assertTrue('__ENDPOINT__TAG2__' in str(ep_annotation), msg=ep_annotation)
        self.assertTrue('__ENDPOINT__DESC__' in str(ep_annotation), msg=ep_annotation)
        self.assertTrue('__ENDPOINT__ATTR1__DESC__' in str(ep_annotation), msg=ep_annotation)
        self.assertTrue('__ENDPOINT__ATTR2__DESC__' in str(ep_annotation), msg=ep_annotation)
        with open(splfile, "r") as fileHandle:
            service_annotation = [line.strip() for line in fileHandle if "@service" in line]
        print(str(service_annotation))
        self.assertTrue('__SERVICE__TITLE__' in str(service_annotation), msg=service_annotation)
        self.assertTrue('__SERVICE__DESC__' in str(service_annotation), msg=service_annotation)
        # test consumingReads operator parameter in generated SPL code
        with open(splfile, "r") as fileHandle:
            sink_invocation = [line.strip() for line in fileHandle if "spl.endpoint::EndpointSink" in line or 'param' in line or 'consumingReads' in line]
        print(str(sink_invocation))
        self.assertTrue('spl.endpoint::EndpointSink' in sink_invocation[-3]
                        and 'param' in sink_invocation[-2]
                        and 'consumingReads' in sink_invocation[-1] and 'true' in sink_invocation[-1],
                        msg=sink_invocation)
示例#31
0
    def test_submit_edge(self):
        topo = Topology("test_submit_edge")
        heartbeat = topo.source(lambda: itertools.count())
        heartbeat.print()

        cfg = {ConfigParams.SSL_VERIFY: False}
        try:
            submission_result = submit(ContextTypes.EDGE, topo.graph, cfg)
            print(str(submission_result))
            self.assertTrue(submission_result is not None)
            self.assertTrue(self._is_not_blank(submission_result.image))
            self.assertTrue(self._is_not_blank(submission_result.imageDigest))
        except RuntimeError as e:
            print(str(e))
            self.skipTest("Skip test, CPD does not support EDGE.")
 def _submitjob(self, args, sab=None):
     args.insert(0, "--disable-ssl-verify")
     args.insert(1, "submitjob")
     if sab:
         args.insert(2, sab)
     else:
         topo = Topology()
         topo.source([1])
         cfg = {}
         cfg[ConfigParams.SSL_VERIFY] = False
         src = submit("BUNDLE", topo, cfg)
         sab_path = src["bundlePath"]
         args.insert(2, sab_path)
         self.files_to_remove.append(sab_path)
     rc, val = streamtool.run_cmd(args=args)
     return rc, val
    def setUpClass(cls):
        # Create the job
        topo = Topology()
        s = topo.source([1])
        p = s.parallel(3, name='test1')
        p = p.filter(lambda x : x > 0)
        e = p.end_parallel()

        cfg = {}
        cfg[ConfigParams.SSL_VERIFY] = False
        src = submit("BUNDLE", topo, cfg)

        cls.sab_path = str(src['bundlePath'])
        if cpd_setup():
            cls.initial_config = str(src['jobConfigPath'])

            cls.files_to_remove = [src['bundlePath'], src['jobConfigPath']]
            cls.new_parallelRegionWidth = 'test*=2'
示例#34
0
    def test_from_topology(self):
        topo = Topology('SabTest', namespace='mynamespace')
        s = topo.source([1, 2])
        es = s.for_each(lambda x: None)
        cfg = {}
        jc = JobConfig(job_name='ABCD', job_group='XXG', preload=True)
        jc.add(cfg)
        bb = submit('BUNDLE', topo, cfg)
        self.assertIn('bundlePath', bb)
        self.assertIn('jobConfigPath', bb)

        with open(bb['jobConfigPath']) as json_data:
            jct = JobConfig.from_overlays(json.load(json_data))
            self.assertEqual(jc.job_name, jct.job_name)
            self.assertEqual(jc.job_group, jct.job_group)
            self.assertEqual(jc.preload, jct.preload)

        os.remove(bb['bundlePath'])
        os.remove(bb['jobConfigPath'])
    def test_submitjob_submission_parameters_complex(self):
        paramList1, paramList2 = self.generateRandom()
        operator1 = "test1"
        operator2 = "test2"

        topo = Topology()
        lower = topo.create_submission_parameter(paramList1[0][0])
        upper = topo.create_submission_parameter(paramList1[1][0])

        s = topo.source([1])
        s.for_each(Test_metrics(lower), name=operator1)
        s.for_each(Test_metrics(upper), name=operator2)

        cfg = {}
        cfg[ConfigParams.SSL_VERIFY] = False
        src = submit("BUNDLE", topo, cfg)
        sab_path = src["bundlePath"]

        # Submit the job
        args = ["--jobname", str(self.name)]
        for prop in paramList2:
            args.extend(["-P", prop])
        rc, my_job = self._submitjob(args, sab=sab_path)
        self.files_to_remove.append(sab_path)
        self.jobs_to_cancel.extend([my_job])

        test1 = my_job.get_operators(operator1)[0]
        test2 = my_job.get_operators(operator2)[0]
        m1, m2 = None, None
        for _ in range(100):
            if m1 and m2:
                break
            time.sleep(1)
            m1 = test1.get_metrics(paramList1[0][1])
            m2 = test2.get_metrics(paramList1[1][1])

        self.assertEqual(rc, 0)

        if not (m1 and m2):
            self.fail("Submission parameters failed to be created")
 def submit_to_service(self, topo, cfg):
     rc = submit("ANALYTICS_SERVICE", topo, cfg)
     self.assertEqual(0, rc['return_code'])
     rc.job.cancel()
     return rc
 def _submit(self, topology):
     return context.submit(self._submission_context, topology, username = self.sws_username, password=self.sws_password)