def post(port=None): channel = grpc.insecure_channel('localhost:%d' % (int(port))) stub = CoreStub(channel) resp = stub.StartSession( cpb.SessionRequest(user_agent='modsquad', version=version)) return MessageToJson(resp)
def test_session(self): channel = grpc.insecure_channel('localhost:45042') stub = core_pb2_grpc.CoreStub(channel) msg = core_pb2.SessionRequest(user_agent="unittest", version="Foo") session = stub.StartSession(msg) self.assertTrue(session.response_info.status.code == core_pb2.OK) session_end_response = stub.EndSession(session.context) self.assertTrue(session_end_response.status.code == core_pb2.OK) # Try to end a session that does not exist fake_context = core_pb2.SessionContext(session_id="fake context") session_end_response = stub.EndSession(fake_context) self.assertTrue( session_end_response.status.code == core_pb2.SESSION_UNKNOWN)
def post(port=None): # get the protocol version version = core_pb2.DESCRIPTOR.GetOptions().Extensions[ core_pb2.protocol_version] # get the address from an environment variable. This must be set in the executing shell. # During automated evaluation runs, the environment variable will be set by Kubernetes server_channel_address = os.environ.get('TA2_SERVER_CONN') # complain in the return if we didn't get an address to connect to if server_channel_address is None: tangelo.http_status(500) return {'error': 'TA2_SERVER_CONN environment variable is not set!'} #channel = grpc.insecure_channel('localhost:%d' % (int(port))) channel = grpc.insecure_channel(server_channel_address) stub = core_pb2_grpc.CoreStub(channel) resp = stub.StartSession( core_pb2.SessionRequest(user_agent='modsquad', version=version)) return MessageToJson(resp)
def test_pipeline(self): "Tries setting up a new pipeline" channel = grpc.insecure_channel('localhost:45042') stub = core_pb2_grpc.CoreStub(channel) msg = core_pb2.SessionRequest(user_agent="unittest", version="Foo") session = stub.StartSession(msg) self.assertTrue(session.response_info.status.code == core_pb2.OK) pipeline_request = core_pb2.PipelineCreateRequest( context=session.context, dataset_uri= "file:///home/sheath/projects/D3M/cmu-ta3/test-data/185_baseball/TRAIN/dataset_TRAIN/datasetDoc.json", task=core_pb2.TASK_TYPE_UNDEFINED, task_subtype=core_pb2.TASK_SUBTYPE_UNDEFINED, task_description="", output=core_pb2.OUTPUT_TYPE_UNDEFINED, metrics=[], target_features=[], predict_features=[], max_pipelines=10) p = stub.CreatePipelines(pipeline_request) for response in p: self.assertTrue(response.response_info.status.code == core_pb2.OK)
def run(): channel = grpc.insecure_channel('localhost:45042') stub = crpc.CoreStub(channel) dstub = drpc.DataExtStub(channel) dfstub = dfrpc.DataflowExtStub(channel) # Start Session session_response = stub.StartSession( core.SessionRequest(user_agent="xxx", version="1.0")) session_context = session_response.context print("Session started (%s)" % str(session_context.session_id)) # Send pipeline creation request dataset_uri = "file:///tmp/data/185_baseball/185_baseball_dataset/datasetDoc.json" some_features = [ core.Feature(resource_id="0", feature_name="d3mIndex"), core.Feature(resource_id="0", feature_name="Games_played"), core.Feature(resource_id="0", feature_name="Runs"), core.Feature(resource_id="0", feature_name="Hits"), core.Feature(resource_id="0", feature_name="Home_runs") ] target_features = [ core.Feature(resource_id="0", feature_name="Hall_of_Fame") ] task = core.TaskType.Value('CLASSIFICATION') task_subtype = core.TaskSubtype.Value('MULTICLASS') task_description = "Classify Hall of Fame" output = core.OutputType.Value('OUTPUT_TYPE_UNDEFINED') metrics = [ core.PerformanceMetric.Value('F1_MICRO'), core.PerformanceMetric.Value('F1_MACRO') ] max_pipelines = 10 pipeline_ids = [] print("Training with some features") pc_request = core.PipelineCreateRequest(context=session_context, dataset_uri=dataset_uri, predict_features=some_features, task=task, task_subtype=task_subtype, task_description=task_description, output=output, metrics=metrics, target_features=target_features, max_pipelines=max_pipelines) ''' # Iterate over results for pcr in stub.CreatePipelines(pc_request): print(str(pcr)) if len(pcr.pipeline_info.scores) > 0: pipeline_ids.append(pcr.pipeline_id) print("Training with some features") pc_request = core.PipelineCreateRequest( context = session_context, train_features = some_features, task = task, task_subtype = task_subtype, task_description = task_description, output = output, metrics = metrics, target_features = target_features, max_pipelines = max_pipelines ) ''' result = stub.CreatePipelines(pc_request) # Iterate over results for pcr in result: print(str(pcr)) ''' for gdr in dfstub.GetDataflowResults(dfext.PipelineReference(context = session_context, pipeline_id = pcr.pipeline_id)): print(gdr) ''' if len(pcr.pipeline_info.scores) > 0: pipeline_id = pcr.pipeline_id pipeline_ids.append(pipeline_id) dflow = dfstub.DescribeDataflow( dfext.PipelineReference(context=session_context, pipeline_id=pipeline_id)) print(dflow) exres = stub.ExportPipeline( core.PipelineExportRequest( context=session_context, pipeline_id=pipeline_id, pipeline_exec_uri="file:///tmp/{}".format(pipeline_id))) print(exres) ''' if pcr.pipeline_info.predict_result_uri is not None: df = pandas.read_csv(pcr.pipeline_info.predict_result_uri, index_col="d3mIndex") print(df) ''' print("************** Executing/Testing Pipelines") # Execute pipelines for pipeline_id in pipeline_ids: print("Executing Pipeline %s" % pipeline_id) ep_request = core.PipelineExecuteRequest(context=session_context, pipeline_id=pipeline_id, dataset_uri=dataset_uri) for ecr in stub.ExecutePipeline(ep_request): print(str(ecr)) if ecr.result_uri is not None: df = pandas.read_csv(ecr.result_uri, index_col="d3mIndex") print(df) list_request = core.PipelineListRequest(context=session_context) lrr = stub.ListPipelines(list_request) print(lrr.pipeline_ids) print("************** Cached pipeline create results") pcrr = core.PipelineCreateResultsRequest(context=session_context, pipeline_ids=lrr.pipeline_ids) for gcpr in stub.GetCreatePipelineResults(pcrr): print(str(gcpr)) print("************** Cached pipeline execute results") perr = core.PipelineExecuteResultsRequest(context=session_context, pipeline_ids=lrr.pipeline_ids) for gepr in stub.GetExecutePipelineResults(perr): print(str(gepr)) print("*********** Updating Metric to Accuracy.. Create pipelines again") metric = core.PerformanceMetric.Value('ACCURACY') ups_request = core.SetProblemDocRequest( context=session_context, updates=[ core.SetProblemDocRequest.ReplaceProblemDocField(metric=metric) ]) print(stub.SetProblemDoc(ups_request)) print("********** Re-running pipeline creation") for pcr in stub.CreatePipelines( core.PipelineCreateRequest(context=session_context)): print(str(pcr)) stub.EndSession(session_context)
def start_session(raven_json_str=None): """Start session command This command sends a UserAgent and the protocol version to the TA2 service """ if raven_json_str is None: err_msg = 'No data found. Please send a "user_agent"' return get_failed_precondition_sess_response(err_msg) # Default if the user_agent is not from the UI #raven_dict = dict(user_agent=settings.TA2_GPRC_USER_AGENT) # The UI has sent JSON in string format that contains the user_agent try: raven_dict = json.loads(raven_json_str) except json.decoder.JSONDecodeError as err_obj: err_msg = 'Failed to convert UI Str to JSON: %s' % (err_obj) return get_failed_precondition_sess_response(err_msg) # check for a user_agent # if not KEY_USER_AGENT_FROM_UI in raven_dict: return get_failed_precondition_sess_response(ERR_MSG_NO_USER_AGENT) # The protocol version always comes from the latest # version we have in the repo (just copied in for now) # raven_dict['version'] = TA2Connection.get_protocol_version() # -------------------------------- # Convert back to string for TA2 call # -------------------------------- content = json.dumps(raven_dict) # -------------------------------- # convert the JSON string to a gRPC request # -------------------------------- try: req = Parse(content, core_pb2.SessionRequest()) except ParseError as err_obj: err_msg = 'Failed to convert JSON to gRPC: %s' % (err_obj) return get_failed_precondition_sess_response(err_msg) # In test mode, check if the incoming JSON is legit (in line above) # -- then return canned response # if settings.TA2_STATIC_TEST_MODE: rnd_session_id = random_info.get_alphanumeric_string(7) info_dict = dict(session_id=rnd_session_id, api_version=TA3TA2Util.get_api_version()) return get_grpc_test_json('test_responses/startsession_ok.json', info_dict) #if random.randint(1,10) == 3: # return get_grpc_test_json('test_responses/startsession_badassertion.json') #else: # return get_grpc_test_json('test_responses/startsession_ok.json', d) # -------------------------------- # Get the connection, return an error if there are channel issues # -------------------------------- core_stub, err_msg = TA2Connection.get_grpc_stub() if err_msg: return get_failed_precondition_sess_response(err_msg) #return dict(status=core_pb2.FAILED_PRECONDITION, # details=err_msg) # -------------------------------- # Send the gRPC request # -------------------------------- try: reply = core_stub.StartSession(req) except Exception as ex: return get_failed_precondition_sess_response(str(ex)) # -------------------------------- # Convert the reply to JSON and send it back # -------------------------------- return MessageToJson(reply)