def main(argv): parser = argparse.ArgumentParser(description='Decompose Bluemix conversation service intents in .json format to intent files in .csv format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('intents', help='file with intents in .json format') parser.add_argument('intentsDir', help='directory with intents files') # optional arguments parser.add_argument('-ni', '--common_intents_nameCheck', action='append', nargs=2, help="regex and replacement for intent name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\\L' for lowercase") parser.add_argument('-s', '--soft', required=False, help='soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' with openFile(args.intents, 'r') as intentsFile: intentsJSON = json.load(intentsFile) # process all intents for intentJSON in intentsJSON: examples = [] # process all example sentences for exampleJSON in intentJSON["examples"]: examples.append(exampleJSON["text"].strip().lower()) # new intent file intentFileName = os.path.join(args.intentsDir, toIntentName(NAME_POLICY, args.common_intents_nameCheck, intentJSON["intent"]) + ".csv") with openFile(intentFileName, "w") as intentFile: for example in examples: intentFile.write((example + "\n")) logger.verbose("Intents from file '%s' were successfully extracted\n", args.intents)
def main(argv): parser = argparse.ArgumentParser(description='This script takes a workspace JSON as one parameter and another JSON (i.e., piece of context data structure) and put the second one into desired place in the first one. This happens inplace.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # arguments parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-w','--common_outputs_workspace', required=False, help='filename of the original workspace JSON') parser.add_argument('-d','--common_outputs_directory', required=False, help='directory, where the workspace is located') parser.add_argument('-j','--includejsondata_jsonfile', required=False, help='file with JSON you want to include') parser.add_argument('-t','--includejsondata_targetkey', required=False, help='the key, where you want to add your JSON, i.e., "data_structure" : null; where you want to replace null, you would put "data_strucute" as this parameter') # optional arguments parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) #init the parameters args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) # get required parameters # workspace with codecs.open(os.path.join(getRequiredParameter(config, 'common_outputs_directory'), getRequiredParameter(config, 'common_outputs_workspace')), 'r', encoding='utf8') as inputpath: try: workspaceInput = json.load(inputpath) except: logger.error('Workspace JSON is not valid JSON: %s', os.path.join(getRequiredParameter(config, 'common_outputs_directory'), getRequiredParameter(config, 'common_outputs_workspace'))) exit(1) # json to add with codecs.open(os.path.join(getRequiredParameter(config, 'includejsondata_jsonfile')), 'r', encoding='utf8') as jsonincludepath: try: jsonInclude = json.load(jsonincludepath) except: logger.error('JSON to include is not valid JSON: %s', os.path.join(getRequiredParameter(config, 'includejsondata_jsonfile'))) exit(1) # target element targetKey = getRequiredParameter(config, 'includejsondata_targetkey') # find the target key and add the json replacedValuesNumber = 0 if 'dialog_nodes' in workspaceInput: workspaceInput['dialog_nodes'], replacedValuesNumber = replaceValue(workspaceInput['dialog_nodes'], targetKey, jsonInclude) else: logger.warning('Workspace does not contain \'dialog_nodes\'') # writing the file with codecs.open(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_workspace')), 'w', encoding='utf8') as outfile: json.dump(workspaceInput, outfile, indent=4) if replacedValuesNumber == 0: logger.warning('Target key not found.') else: logger.info('Writing workspaces with added JSON successfull.') logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'Compares dialog JSON before (input) and after (output) the conversion from JSON to WAW and back to JSON', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('inputDialogFileName', help='file with original dialog JSON') parser.add_argument( 'outputDialogFileName', help='file with output dialog JSON run through WAW scripts') # optional arguments parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) inputpath = args.inputDialogFileName outputpath = args.outputDialogFileName if not os.path.isfile(inputpath): logger.error("Input dialog json '%s' does not exist.", inputpath) exit(1) if not os.path.isfile(outputpath): logger.error("Output dialog json '%s' does not exist.", outputpath) exit(1) with open(inputpath) as f: dialogInputUnsorted = json.load(f) with open(outputpath) as g: dialogOutputUnsorted = json.load(g) result = DeepDiff(dialogInputUnsorted, dialogOutputUnsorted, ignore_order=True).json logger.debug("result: %s", json.dumps(result, indent=4)) if result == '{}': logger.info("Dialog JSONs are same.") exit(0) else: logger.info("Dialog JSONs differ.") exit(1)
def main(argv): logger.info('STARTING: ' + os.path.basename(__file__)) parser = argparse.ArgumentParser( description= 'Deletes Bluemix conversation service workspace and deletes workspace id from config file.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument('-cu', '--conversation_url', required=False, help='url of the conversation service API') parser.add_argument('-cv', '--conversation_version', required=False, help='version of the conversation service API') parser.add_argument('-cn', '--conversation_username', required=False, help='username of the conversation service instance') parser.add_argument('-cp', '--conversation_password', required=False, help='password of the conversation service instance') parser.add_argument('-cid', '--conversation_workspace_id', required=False, help='workspace_id of the application.') parser.add_argument('-wn', '--conversation_workspace_name', required=False, help='name of the workspace') parser.add_argument( '-wnm', '--conversation_workspace_match_by_name', required=False, help= 'true if the workspace name should be matched by name (or pattern if defined)' ) parser.add_argument( '-wnp', '--conversation_workspace_name_pattern', required=False, help='regex pattern specifying a name of workspaces to be deleted') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) # load credentials version = getRequiredParameter(config, 'conversation_version') workspacesUrl = getRequiredParameter(config, 'conversation_url') username = getRequiredParameter(config, 'conversation_username') password = getRequiredParameter(config, 'conversation_password') try: workspaces = filterWorkspaces( config, getWorkspaces(workspacesUrl, version, username, password)) except SystemExit as e: logger.error("Failed to retrieve workspaces to delete.") sys.exit(1) nWorkspacesDeleted = 0 for workspace in workspaces: # delete workspace requestUrl = workspacesUrl + '/' + workspace[ 'workspace_id'] + '?version=' + version response = requests.delete(requestUrl, auth=(username, password), headers={'Accept': 'text/html'}) responseJson = response.json() # check errors during upload errorsInResponse(responseJson) if response.status_code == 200: nWorkspacesDeleted += 1 logger.info("Workspace '%s' was successfully deleted", workspace['name']) # delete workspaceId from config file if hasattr(config, 'conversation_workspace_id'): delattr(config, 'conversation_workspace_id') elif response.status_code == 400: logger.error( "Error while deleting workspace '%s', status code '%s' (invalid request)", workspace['name'], response.status_code) sys.exit(1) else: logger.error( "Error while deleting workspace '%s', status code '%s'", workspace['name'], response.status_code) sys.exit(1) if not nWorkspacesDeleted: logger.info("No workspace has been deleted") elif nWorkspacesDeleted == 1: logger.info("One workspace has been successfully deleted") else: logger.info( str(nWorkspacesDeleted) + " workspaces have been successfully deleted") outputConfigFile = getOptionalParameter(config, 'common_output_config') if outputConfigFile: config.saveConfiguration(outputConfigFile) logger.info("Configuration was saved to %s", outputConfigFile)
def main(argv): parser = argparse.ArgumentParser( description= 'Replaces codes in text tags with sentences specified in the resource file.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('dialog', help='dialog nodes in xml format.') parser.add_argument( 'resource', help= 'file with translations from codes to sentences (JSON format - https://console-regional.stage1.ng.bluemix.net/docs/services/GlobalizationPipeline/bundles.html#globalizationpipeline_workingwithbundles)' ) # optional arguments parser.add_argument( '-o', '--output', required=False, help='dialog nodes in xml format with all texts replaced by codes.') parser.add_argument( '-t', '--tagsXPath', required=False, nargs='+', default=['//text[not(values)]', '//values'], help='Additional XPath of tags whose code should be replaced by text.') parser.add_argument('-i', '--inplace', required=False, help='replace input dialog by output.', action='store_true') parser.add_argument( '-s', '--soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' # load dialog from XML dialogXML = LET.parse(args.dialog) # find all tags with codes to replace tagsToReplace = [] for tagXPath in args.tagsXPath: tagsToReplace.extend(dialogXML.xpath(tagXPath)) # LOAD RESOURCE FILE (TRANSLATIONS) with openFile(args.resource, 'r') as resourceFile: translations = json.load(resourceFile) # REPLACE ALL CODES WITH TEXTS for tagToReplace in tagsToReplace: if tagToReplace.text is None: continue logger.verbose("%s: code '%s'", tagToReplace.tag, tagToReplace.text) textParts = tagToReplace.text.split() for textPart in textParts: if not textPart.startswith('%%'): continue # it is not a code code = toCode(NAME_POLICY, textPart[2:]) # if this tag code is not in translations dictionary -> error if not code in translations: logger.error("code '%s' not in resource file!", code) else: # replace code (introduced with double %% and followed by white character or by the end) with its translation newText = re.sub(r"%%" + code + "(?=\s|$)", translations[code], tagToReplace.text) tagToReplace.text = newText logger.verbose("-> translated as %s", tagToReplace.text) # OUTPUT NEW DIALOG if args.output is not None: with openFile(args.output, 'w') as outputFile: outputFile.write( LET.tostring(dialogXML, pretty_print=True, encoding='utf8')) elif args.inplace: with openFile(args.dialog, 'w') as outputFile: outputFile.write( LET.tostring(dialogXML, pretty_print=True, encoding='utf8')) else: sys.stdout.write( LET.tostring(dialogXML, pretty_print=True, encoding='utf8')) logger.verbose('Codes were successfully replaced with texts.')
def main(argv): parser = argparse.ArgumentParser(description='Concatenate intents, entities and dialogue jsons to Watson Conversation Service workspace .json format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument('-of', '--common_outputs_directory', required=False, help='directory where the otputs will be stored (outputs is default)') parser.add_argument('-oi', '--common_outputs_intents', required=False, help='json file with intents') parser.add_argument('-oe', '--common_outputs_entities', required=False, help='json file with entities') parser.add_argument('-od', '--common_outputs_dialogs', required=False, help='json file with dialogs') parser.add_argument('-ox', '--common_outputs_counterexamples', required=False, help='json file with counterexamples') parser.add_argument('-ow', '--common_outputs_workspace', required=False, help='json file with workspace') parser.add_argument('-wn','--conversation_workspace_name', required=False, help='name of this workspace') parser.add_argument('-wl','--conversation_language', required=False, help='language of generated workspace') parser.add_argument('-wd','--conversation_description', required=False, help='description') parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) workspace = {} if hasattr(config, 'conversation_workspace_name'): workspace['name'] = getattr(config, 'conversation_workspace_name') else: workspace['name'] = 'default_workspace_name' if hasattr(config, 'conversation_language'): workspace['language'] = getattr(config, 'conversation_language') else: workspace['language'] = 'en' if hasattr(config, 'conversation_description'): workspace['description'] = getattr(config, 'conversation_description') else: workspace['description'] = '' if not hasattr(config, 'common_outputs_directory'): logger.info('outputs_directory is not defined!') exit(1) # process intents intentsJSON = {} if hasattr(config, 'common_outputs_intents'): with openFile(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_intents')), 'r', encoding='utf8') as intentsFile: intentsJSON = json.load(intentsFile) workspace['intents'] = intentsJSON else: logger.info('output_intents not specified, omitting intents.') # process entities entitiesJSON = {} if hasattr(config, 'common_outputs_entities'): with openFile(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_entities')), 'r', encoding='utf8') as entitiesFile: entitiesJSON = json.load(entitiesFile) workspace['entities'] = entitiesJSON else: logger.info('output_entities not specified, omitting entities.') # process dialog dialogJSON = {} if hasattr(config, 'common_outputs_dialogs'): with openFile(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_dialogs')), 'r', encoding='utf8') as dialogFile: dialogJSON = json.load(dialogFile) workspace['dialog_nodes'] = dialogJSON else: logger.info('outputs_dialogs not specified, omitting dialog.') # process counterexamples intentExamplesJSON = {} # counterexamples in "intent format" counterexamplesJSON = [] # simple list of counterexamples ("text": "example sentence") if hasattr(config, 'common_outputs_counterexamples'): with openFile(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_counterexamples')), 'r', encoding='utf8') as counterexamplesFile: intentExamplesJSON = json.load(counterexamplesFile) for intentExampleJSON in intentExamplesJSON: counterexamplesJSON.extend(intentExampleJSON['examples']) workspace['counterexamples'] = counterexamplesJSON else: logger.info('outputs_counterexamples not specified, omitting counterexamples.') if hasattr(config, 'common_outputs_workspace'): with openFile(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_workspace')), 'w', encoding='utf8') as outputFile: outputFile.write(json.dumps(workspace, indent=4, ensure_ascii=False)) else: logger.info('output_workspace not specified, generating to console.') logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): # parse sequence names - because we need to get the name first and # then create corresponding arguments for the main parser sequenceSubparser = argparse.ArgumentParser() sequenceSubparser.add_argument('--cloudfunctions_sequences', nargs='+') argvWithoutHelp = list(argv) if "--help" in argv: argvWithoutHelp.remove("--help") if "-h" in argv: argvWithoutHelp.remove("-h") sequenceNames = sequenceSubparser.parse_known_args( argvWithoutHelp)[0].cloudfunctions_sequences or [] parser = argparse.ArgumentParser( description="Deploys the cloud functions", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('-c', '--common_configFilePaths', help="configuaration file", action='append') parser.add_argument('--common_functions', required=False, help="directory where the cloud functions are located") parser.add_argument('--cloudfunctions_namespace', required=False, help="cloud functions namespace") parser.add_argument('--cloudfunctions_apikey', required=False, help="cloud functions apikey") parser.add_argument('--cloudfunctions_username', required=False, help="cloud functions user name") parser.add_argument('--cloudfunctions_password', required=False, help="cloud functions password") parser.add_argument('--cloudfunctions_package', required=False, help="cloud functions package name") parser.add_argument('--cloudfunctions_url', required=False, help="url of cloud functions API") parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) parser.add_argument('--cloudfunctions_sequences', nargs='+', required=False, help="cloud functions sequence names") for runtime in list(interpretedRuntimes.values()) + list( compiledRuntimes.values()): parser.add_argument('--cloudfunctions_' + runtime + '_version', required=False, help="cloud functions " + runtime + " version") # Add arguments for each sequence to be able to define the functions in the sequence for sequenceName in sequenceNames: try: parser.add_argument("--cloudfunctions_sequence_" + sequenceName, required=True, help="functions in sequence '" + sequenceName + "'") except argparse.ArgumentError as e: if "conflicting option" in str(e): # from None is needed in order to show only the custom exception and not the whole traceback # (It would read as 'During handling of the above exception, another exception has occurred', but we DID handle it) raise argparse.ArgumentError( None, "Duplicate sequence name: " + sequenceName) from None else: raise e args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) logger.info('STARTING: ' + os.path.basename(__file__)) def handleResponse(response): """Get response code and show an error if it's not OK""" code = response.status_code if code != requests.codes.ok: if code == 401: logger.error( "Authorization error. Check your credentials. (Error code " + str(code) + ")") elif code == 403: logger.error( "Access is forbidden. Check your credentials and permissions. (Error code " + str(code) + ")") elif code == 404: logger.error( "The resource could not be found. Check your cloudfunctions url and namespace. (Error code " + str(code) + ")") elif code == 408: logger.error("Request Timeout. (Error code " + str(code) + ")") elif code >= 500: logger.error("Internal server error. (Error code " + str(code) + ")") else: logger.error("Unexpected error code: " + str(code)) errorsInResponse(response.json()) return False return True config = Cfg(args) namespace = getRequiredParameter(config, 'cloudfunctions_namespace') urlNamespace = quote(namespace) auth = getParametersCombination( config, 'cloudfunctions_apikey', ['cloudfunctions_password', 'cloudfunctions_username']) package = getRequiredParameter(config, 'cloudfunctions_package') cloudFunctionsUrl = getRequiredParameter(config, 'cloudfunctions_url') functionDir = getRequiredParameter(config, 'common_functions') # If sequence names are already defined (from console), do nothing. Else look for them in the configuration. if not sequenceNames: sequenceNames = getOptionalParameter(config, 'cloudfunctions_sequences') or [] # SequenceNames has to be a list if type(sequenceNames) is str: sequenceNames = [sequenceNames] # Create a dict of {<seqName>: [<functions 1>, <function2> ,...]} sequences = { seqName: getRequiredParameter(config, "cloudfunctions_sequence_" + seqName) for seqName in sequenceNames } if 'cloudfunctions_apikey' in auth: username, password = convertApikeyToUsernameAndPassword( auth['cloudfunctions_apikey']) else: username = auth['cloudfunctions_username'] password = auth['cloudfunctions_password'] runtimeVersions = {} for ext, runtime in list(interpretedRuntimes.items()) + list( compiledRuntimes.items()): runtimeVersions[runtime] = runtime + ':' + getattr( config, 'cloudfunctions_' + runtime + '_version', 'default') requests.packages.urllib3.disable_warnings(InsecureRequestWarning) packageUrl = cloudFunctionsUrl + '/' + urlNamespace + '/packages/' + package + '?overwrite=true' logger.info("Will create cloudfunctions package %s.", package) response = requests.put(packageUrl, auth=(username, password), headers={'Content-Type': 'application/json'}, data='{}') if not handleResponse(response): logger.critical("Cannot create cloud functions package %s.", package) sys.exit(1) else: logger.info('Cloud functions package successfully uploaded') filesAtPath = getFilesAtPath(functionDir, [ '*' + ext for ext in (list(interpretedRuntimes) + list(compiledRuntimes) + compressedFiles) ]) logger.info("Will deploy functions at paths %s.", functionDir) for functionFilePath in filesAtPath: fileName = os.path.basename(functionFilePath) (funcName, ext) = os.path.splitext(fileName) runtime = None binary = False # if the file is zip, it's necessary to look inside if ext == '.zip': runtime = _getZipPackageType(functionFilePath) if not runtime: logger.warning( "Cannot determine function type from zip file '%s'. Skipping!", functionFilePath) continue binary = True else: if ext in interpretedRuntimes: runtime = interpretedRuntimes[ext] binary = False elif ext in compiledRuntimes: runtime = compiledRuntimes[ext] binary = True else: logger.warning( "Cannot determine function type of '%s'. Skipping!", functionFilePath) continue functionUrl = cloudFunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + funcName + '?overwrite=true' if binary: content = base64.b64encode(open(functionFilePath, 'rb').read()).decode('utf-8') else: content = open(functionFilePath, 'r').read() payload = { 'exec': { 'kind': runtimeVersions[runtime], 'binary': binary, 'code': content } } logger.verbose("Deploying function %s", funcName) response = requests.put(functionUrl, auth=(username, password), headers={'Content-Type': 'application/json'}, data=json.dumps(payload), verify=False) if not handleResponse(response): logger.critical("Cannot deploy cloud function %s.", funcName) sys.exit(1) else: logger.verbose('Cloud function %s successfully deployed.', funcName) logger.info("Cloudfunctions successfully deployed.") if sequences: logger.info("Will deploy cloudfunction sequences.") for seqName in sequences: sequenceUrl = cloudFunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + seqName + '?overwrite=true' functionNames = sequences[seqName] fullFunctionNames = [ namespace + '/' + package + '/' + functionName for functionName in functionNames ] payload = { 'exec': { 'kind': 'sequence', 'binary': False, 'components': fullFunctionNames } } logger.verbose("Deploying cloudfunctions sequence '%s': %s", seqName, functionNames) response = requests.put(sequenceUrl, auth=(username, password), headers={'Content-Type': 'application/json'}, data=json.dumps(payload), verify=False) if not handleResponse(response): logger.critical("Cannot deploy cloudfunctions sequence %s", seqName) sys.exit(1) else: logger.verbose("Sequence '%s' deployed.", seqName) if sequences: logger.info("Cloudfunction sequences successfully deployed.") logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'Decompose Bluemix conversation service dialog in .json format to dialog files in .xml format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument( 'dialog', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help= 'file with dialog in .json format, if not specified, dialog is read from standard input' ) # optional arguments parser.add_argument( '-d', '--dialogDir', required=False, help= 'directory with dialog files. If not specified, output is printed to standard output' ) parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) global STDOUT STDOUT = not args.dialogDir # XML namespaces global XSI_NAMESPACE global XSI global NSMAP XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance" XSI = "{%s}" % XSI_NAMESPACE NSMAP = {"xsi": XSI_NAMESPACE} # load dialogs JSON dialogsJSON = json.load(args.dialog, encoding='utf-8') # convert dialogs dialogsXML = convertDialog(dialogsJSON) # return dialog XML if args.dialogDir: # print to file dialogFileName = os.path.join(args.dialogDir, "dialog.xml") with openFile(dialogFileName, "w") as dialogFile: dialogFile.write( LET.tostring(dialogsXML, pretty_print=True, encoding='unicode')) else: # print to standard output print(LET.tostring(dialogsXML, pretty_print=True, encoding='unicode'))
def main(argv): parser = argparse.ArgumentParser( description='Clean generated directories.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument( '-od', '--common_outputs_directory', required=False, help='directory where the otputs will be stored (outputs is default)') parser.add_argument('-oi', '--common_outputs_intents', help='file with output json with all the intents') parser.add_argument('-oe', '--common_outputs_entities', help='file with output json with all the entities') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument( '-s', '--common_soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) if os.path.exists(config.common_generated_dialogs[0]): shutil.rmtree(config.common_generated_dialogs[0]) logger.verbose('%s does not exist.', config.common_generated_dialogs[0]) else: logger.verbose('%s does not exist.', config.common_generated_dialogs[0]) if os.path.exists(config.common_generated_intents[0]): shutil.rmtree(config.common_generated_intents[0]) logger.verbose('%s does not exist.', config.common_generated_intents[0]) else: logger.verbose('%s doess not exist.', config.common_generated_intents[0]) if os.path.exists(config.common_generated_entities[0]): shutil.rmtree(config.common_generated_entities[0]) logger.verbose('%s does not exist.', config.common_generated_entities[0]) else: logger.verbose('Does not exist.') if os.path.exists(config.common_outputs_directory): shutil.rmtree(config.common_outputs_directory) logger.verbose('%s has been removed.', config.common_outputs_directory) else: logger.verbose('%s does not exist.', config.common_outputs_directory) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser(description='Converts intents files to one file in NLU tsv format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('intentsDir', help='directory with intents files - all of them will be included in output file') parser.add_argument('output', help='file with output intents in NLU data .tsv format') # optional arguments parser.add_argument('-e', '--entityDir', required=False, help='directory with lists of entities in csv files (file names = entity names), used to tag those entities in output') parser.add_argument('-l', '--list', required=False, help='file with list of all intents (if it should be generated)') parser.add_argument('-m', '--map', required=False, help='file with domain to intents map (if it should be generated)') parser.add_argument('-p', '--prefix', required=False, help='prefix for all generated intents (if it should be added)') parser.add_argument('-ni', '--common_intents_nameCheck', action='append', nargs=2, help="regex and replacement for intent name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\L' for lowercase") parser.add_argument('-ne', '--common_entities_nameCheck', action='append', nargs=2, help="regex and replacement for entity name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\L' for lowercase") parser.add_argument('-s', '--soft', required=False, help='soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true', default="") parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' PREFIX = toIntentName(NAME_POLICY, args.common_intents_nameCheck, args.prefix) if args.entityDir: entities = getEntities(args.entityDir, args.common_entities_nameCheck, NAME_POLICY) with openFile(args.output, 'w') as outputFile: # process intents intentNames = [] for intentFileName in os.listdir(args.intentsDir): intentName = toIntentName(NAME_POLICY, args.common_intents_nameCheck, PREFIX, os.path.splitext(intentFileName)[0]) if intentName not in intentNames: intentNames.append(intentName) with open(os.path.join(args.intentsDir, intentFileName), "r") as intentFile: for line in intentFile.readlines(): # remove comments line = line.split('#')[0] if args.entityDir: line = tagEntities(line, entities) if line: outputFile.write("1\t" + intentName + "\t" + line) logger.verbose("Intents file '%s' was successfully created", args.output) if args.list: with openFile(args.list, 'w') as intentsListFile: for intentName in intentNames: intentsListFile.write(intentName + "\n") logger.verbose("Intents list '%s' was successfully created", args.list) if args.map: domIntMap = {} for intentName in intentNames: intentSplit = intentName.split("_",1) domainPart = intentSplit[0] intentPart = intentSplit[1] if domainPart in domIntMap: domIntMap[domainPart] = domIntMap[domainPart] + ";" + intentPart else: domIntMap[domainPart] = ";" + intentPart with openFile(args.map, 'w') as intentsMapFile: for domainPart in domIntMap.keys(): intentsMapFile.write(domainPart + domIntMap[domainPart] + "\n") logger.verbose("Domain-intent map '%s' was successfully created", args.output)
def main(argv): parser = argparse.ArgumentParser( description= 'Decompose Bluemix conversation service entities in .json format to entity files in .csv format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('entities', help='file with entities in .json format') parser.add_argument('entitiesDir', help='directory with entities files') # optional arguments parser.add_argument( '-ne', '--common_entities_nameCheck', action='append', nargs=2, help= "regex and replacement for entity name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\\L' for lowercase" ) parser.add_argument( '-s', '--soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' with openFile(args.entities, 'r') as entitiesFile: entitiesJSON = json.load(entitiesFile) systemEntities = [] # process all entities for entityJSON in entitiesJSON: # process system entity if entityJSON["entity"].strip().lower().startswith("sys-"): # issue #82: make entity name check parameter-dependent #systemEntities.append(toEntityName(NAME_POLICY, entityJSON["entity"])) systemEntities.append(entityJSON["entity"]) # process normal entity else: values = [] # process all entity values for valueJSON in entityJSON["values"]: value = [] # synonyms entities if 'synonyms' in valueJSON: value.append(valueJSON["value"].strip()) # add all synonyms for synonym in valueJSON['synonyms']: # empty-string synonyms are ignored when exported from WA json if synonym.strip() != '': value.append(synonym.strip()) # for pattern entities add tilde to the value if 'patterns' in valueJSON: value.append("~" + valueJSON["value"].strip()) # add all synonyms for pattern in valueJSON["patterns"]: value.append(pattern.strip()) values.append(value) # new entity file entityFileName = os.path.join( args.entitiesDir, toEntityName(NAME_POLICY, args.common_entities_nameCheck, entityJSON["entity"])) + ".csv" with openFile(entityFileName, "w") as entityFile: for value in values: entityFile.write(';'.join(value) + "\n") # write file with system entities with openFile(os.path.join(args.entitiesDir, "system_entities.csv"), 'w') as systemEntitiesFile: systemEntitiesFile.write( "# a special list for the system entities - only one value at each line\n" ) for systemEntity in systemEntities: systemEntitiesFile.write(systemEntity + "\n") logger.verbose("Entities from file '%s' were successfully extracted\n", args.entities)
def main(argv): parser = argparse.ArgumentParser( description= 'Converts dialog nodes from .xml format to Bluemix conversation service workspace .json format', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-dm', '--common_dialog_main', required=False, help='main dialog file with dialogue nodes in xml format') parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument('-s', '--common_schema', required=False, help='schema file') parser.add_argument('-sc', '--common_scope', required=False, help='scope of dialog, e.g. type-local') parser.add_argument( '-of', '--common_outputs_directory', required=False, help='directory where the outputs will be stored (outputs is default)') parser.add_argument( '-od', '--common_outputs_dialogs', required=False, help='name of generated file (dialog.json is the default)') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) global config if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) # XML namespaces global XSI_NAMESPACE global XSI global NSMAP XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance" XSI = "{%s}" % XSI_NAMESPACE NSMAP = {"xsi": XSI_NAMESPACE} # load dialogue from XML if hasattr(config, 'common_dialog_main'): #TODO might need UTF-8 dialogTree = LET.parse(getattr(config, 'common_dialog_main')) else: dialogTree = LET.parse(sys.stdin) # load schema schemaParam = getOptionalParameter(config, 'common_schema') if schemaParam: schemaDirname = os.path.split(os.path.abspath(__file__))[0] schemaFile = os.path.join(schemaDirname, schemaParam) if not os.path.exists(schemaFile): logger.error('Schema file %s not found.', schemaFile) exit(1) #TODO might need UTF-8 schemaTree = LET.parse(schemaFile) global schema schema = LET.XMLSchema(schemaTree) validate(dialogTree) # process dialog tree root = dialogTree.getroot() global rootGlobal rootGlobal = root importNodes(root, config) # remove all comments removeAllComments(dialogTree) # remove nodes which are out of specified scope removeOutOfScopeNodes(dialogTree) # find all node names global names names = findAllNodeNames(dialogTree) parent_map = dict((c, p) for p in dialogTree.getiterator() for c in p) generateNodes(root, None, DEFAULT_ABORT, DEFAULT_AGAIN, DEFAULT_BACK, DEFAULT_REPEAT, DEFAULT_GENERIC) # create dialog structure for JSON dialogNodes = [] # convert XML tree to JSON structure printNodes(root, None, dialogNodes) if hasattr(config, 'common_outputs_directory') and hasattr( config, 'common_outputs_dialogs'): if not os.path.exists(getattr(config, 'common_outputs_directory')): os.makedirs(getattr(config, 'common_outputs_directory')) logger.info("Created new output directory %s", getattr(config, 'common_outputs_directory')) with io.open(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_dialogs')), 'w', encoding='utf-8') as outputFile: outputFile.write( json.dumps(dialogNodes, indent=4, ensure_ascii=False)) logger.info( "File %s created", os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_dialogs'))) else: print(json.dumps(dialogNodes, indent=4, ensure_ascii=False)) if hasattr(config, 'common_output_config'): config.saveConfiguration(getattr(config, 'common_output_config')) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): ''' Scripts takes input json file that represents test that should be run against Cloud Functions and produce output that extends input json file by results from CFs and evaluation. Inputs and expected outputs can contain string values that starts with '::' (e.g. "key": "::valueToBeReplaced1") which will be replaced by matching configuration parameters or by values specified by parameter 'replace' (format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\')). Input json file example: [ { "name": "test example 1", # OPTIONAL "type": "EXACT_MATCH", # OPTIONAL (DEFAULT = EXACT_MATCH, OPTIONS = [EXACT_MATCH]) "cf_package": "<CLOUD FUNCTIONS PACKAGE NAME>", # OPTIONAL (could be provided directly to script, at least one has to be specified, test level overrides global script one) "cf_function": "<CLOUD FUNCTIONS SPECIFIC FUNCTION TO BE TESTED>", # --||-- "input": <OBJECT> | <@PATH/TO/FILE>, # payload to be send to CF (could be specified as a relative or absolute path to the file that contains json file, e.g. "input": "@inputs/test_example_1.json") "outputExpected": <OBJECT> | <@PATH/TO/FILE>, # expected payload to be return from CF (--||--) }, { "name": "test example 2", ... rest of the test definition ... } ] Output json file example: [ { "name": "test example 1", ... rest of the input test definition ... "outputReturned": <OBJECT>, # returned payload from CF "result": <0 - test passed, 1 - test failed> "diff": <OBJECT> # if test passed then "diff" is Null, else contains object that represents differences } ] ''' parser = argparse.ArgumentParser(description='Tests all tests specified in given file against Cloud Functions and save test outputs to output file', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('inputFileName', help='File with json array containing tests.') parser.add_argument('outputFileName', help='File where to store test outputs.') # optional arguments parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('--cloudfunctions_url', required=False, help='url of cloud functions API') parser.add_argument('--cloudfunctions_namespace', required=False, help='cloud functions namespace') parser.add_argument('--cloudfunctions_package', required=False, help='cloud functions package name') parser.add_argument('--cloudfunctions_function', required=False, help='cloud functions specific function to be tested') parser.add_argument('--cloudfunctions_apikey', required=False, help="cloud functions apikey") parser.add_argument('--cloudfunctions_username', required=False, help='cloud functions user name') parser.add_argument('--cloudfunctions_password', required=False, help='cloud functions password') parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) parser.add_argument('--replace', required=False, help='string values to be replaced in input and expected output json (format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\')') args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: '+ os.path.basename(__file__)) url = getRequiredParameter(config, 'cloudfunctions_url') namespace = getRequiredParameter(config, 'cloudfunctions_namespace') auth = getParametersCombination(config, 'cloudfunctions_apikey', ['cloudfunctions_password', 'cloudfunctions_username']) package = getOptionalParameter(config, 'cloudfunctions_package') function = getOptionalParameter(config, 'cloudfunctions_function') if 'cloudfunctions_apikey' in auth: username, password = convertApikeyToUsernameAndPassword(auth['cloudfunctions_apikey']) else: username = auth['cloudfunctions_username'] password = auth['cloudfunctions_password'] try: inputFile = open(args.inputFileName, 'r') except IOError: logger.critical('Cannot open test input file %s', args.inputFileName) sys.exit(1) try: outputFile = open(args.outputFileName, 'w') except IOError: logger.critical('Cannot open test output file %s', args.outputFileName) sys.exit(1) try: inputJson = json.load(inputFile) except ValueError as e: logger.critical('Cannot decode json from test input file %s, error: %s', args.inputFileName, str(e)) sys.exit(1) if not isinstance(inputJson, list): logger.critical('Input test json is not array!') sys.exit(1) replaceDict = {} for attr in dir(config): if not attr.startswith("__"): if attr == 'replace': # format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\' replacementsString = getattr(config, attr) for replacementString in replacementsString.split(','): replacementStringSplit = replacementString.split(':') if len(replacementStringSplit) != 2 or not replacementStringSplit[0] or not replacementStringSplit[1]: logger.critical('Invalid format of \'replace\' parameter, valid format is \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\'') sys.exit(1) replaceDict[replacementStringSplit[0]] = replacementStringSplit[1] else: replaceDict[attr] = getattr(config, attr) # run tests testCounter = 0 for test in inputJson: if not isinstance(test, dict): logger.error('Input test array element %d is not dictionary. Each test has to be dictionary, please see doc!', testCounter) continue logger.info('Test number: %d, name: %s', testCounter, (test['name'] if 'name' in test else '-')) testUrl = \ url + ('' if url.endswith('/') else '/') + \ namespace + '/actions/' + (test['cf_package'] if 'cf_package' in test else package) + '/' + \ (test['cf_function'] if 'cf_function' in test else function) + \ '?blocking=true&result=true' logger.info('Tested function url: %s', testUrl) # load test input payload json testInputJson = test['input'] testInputPath = None try: if testInputJson.startswith('@'): testInputPath = os.path.join(os.path.dirname(args.inputFileName), testInputJson[1:]) logger.debug('Loading input payload from file: %s', testInputPath) try: inputFile = open(testInputPath, 'r') except IOError: logger.error('Cannot open input payload from file: %s', testInputPath) continue try: testInputJson = json.load(inputFile) except ValueError as e: logger.error('Cannot decode json from input payload from file %s, error: %s', testInputPath, str(e)) continue except: pass if not testInputPath: logger.debug('Input payload provided inside the test') # load test expected output payload json testOutputExpectedJson = test['outputExpected'] testOutputExpectedPath = None try: if testOutputExpectedJson.startswith('@'): testOutputExpectedPath = os.path.join(os.path.dirname(args.inputFileName), testOutputExpectedJson[1:]) logger.debug('Loading expected output payload from file: %s', testOutputExpectedPath) try: outputExpectedFile = open(testOutputExpectedPath, 'r') except IOError: logger.error('Cannot open expected output payload from file: %s', testOutputExpectedPath) continue try: testOutputExpectedJson = json.load(outputExpectedFile) except ValueError as e: logger.error('Cannot decode json from expected output payload from file %s, error: %s', testOutputExpectedPath, str(e)) continue except: pass if not testOutputExpectedPath: logger.debug('Expected output payload provided inside the test') logger.debug('Replacing values in input and expected output jsons by configuration parameters.') for target, value in replaceDict.items(): testInputJson, replacementNumber = replaceValue(testInputJson, '::' + target, value, False) if replacementNumber > 0: logger.debug('Replaced configuration parameter \'%s\' in input json, number of occurences: %d.', target, replacementNumber) testOutputExpectedJson, replacementNumber = replaceValue(testOutputExpectedJson, '::' + target, value, False) if replacementNumber > 0: logger.debug('Replaced configuration parameter \'%s\' in expected output json, number of occurences: %d.', target, replacementNumber) # call CF logger.debug('Sending input json: %s', json.dumps(testInputJson, ensure_ascii=False).encode('utf8')) response = requests.post( testUrl, auth=(username, password), headers={'Content-Type': 'application/json'}, data=json.dumps(testInputJson, ensure_ascii=False).encode('utf8')) responseContentType = response.headers.get('content-type') if responseContentType != 'application/json': logger.error('Response content type is not json, content type: %s, response:\n%s', responseContentType, response.text) continue # check status if response.status_code == 200: testOutputReturnedJson = response.json() logger.debug('Received output json: %s', json.dumps(testOutputReturnedJson, ensure_ascii=False).encode('utf8')) test['outputReturned'] = testOutputReturnedJson # evaluate test if 'type' not in test or test['type'] == 'EXACT_MATCH': testResultString = DeepDiff(testOutputExpectedJson, testOutputReturnedJson, ignore_order=True).json testResultJson = json.loads(testResultString) if testResultJson == {}: test['result'] = 0 else: test['result'] = 1 test['diff'] = testResultJson else: logger.error('Unknown test type: %s', test['type']) elif response.status_code in [202, 403, 404, 408]: # 202 Accepted activation request (should not happen while sending 'blocking=true&result=true') # 403 Forbidden (could be just for specific package or function) # 404 Not Found (action or package could be incorrectly specified for given test) # 408 Request Timeout (could happen e.g. for CF that calls some REST APIs, e.g. Discovery service) # 502 Bad Gateway (when the CF raises exception, e.g. bad params where provided) # => Could be issue just for given test, so we don't want to stop whole testing. logger.error('Unexpected response status: %d, response: %s', response.status_code, json.dumps(response.json(), ensure_ascii=False).encode('utf8')) else: # 401 Unauthorized (while we use same credentials for all tests then we want to end after the first test returns bad authentification) # 500 Internal Server Error (could happen that IBM Cloud has several issue and is not able to handle incoming requests, then it would be probably same for all tests) # => We don't want to continue with testing. logger.critical('Unexpected response status (cannot continue with testing): %d, response: %s', response.status_code, json.dumps(response.json(), ensure_ascii=False).encode('utf8')) sys.exit(1) testCounter += 1 outputFile.write(json.dumps(inputJson, indent=4, ensure_ascii=False) + '\n') logger.info('FINISHING: '+ os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'convert NLU tsv files into domain-entity and intent-entity mappings.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument( 'entitiesDir', help= 'directory with entities files - all of them will be included in output list if specified' ) # optional arguments parser.add_argument( '-is', '--sentences', help= '.tsv file in NLU format with tagged entities in example sentences in third column and intent names in second column' ) parser.add_argument( '-l', '--list', required=False, help='output file with list of all entities (if it should be generated)' ) parser.add_argument( '-d', '--domEnt', required=False, help= 'output file with domain-entity mapping (if it should be generated)') parser.add_argument( '-i', '--intEnt', required=False, help= 'output file with intent-entity mapping (if it should be generated)') parser.add_argument( '-ni', '--common_intents_nameCheck', action='append', nargs=2, help= "regex and replacement for intent name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\L' for lowercase" ) parser.add_argument( '-ne', '--common_entities_nameCheck', action='append', nargs=2, help= "regex and replacement for entity name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\L' for lowercase" ) parser.add_argument( '-s', '--soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' domEntMap = defaultdict(dict) intEntMap = defaultdict(dict) if args.sentences: with openFile(args.sentences, "r") as sentencesFile: for line in sentencesFile.readlines(): line = line.rstrip() if not line: continue intentName = toIntentName(NAME_POLICY, args.common_intents_nameCheck, line.split("\t")[1]) intentText = line.split("\t")[2] intentSplit = intentName.split("_", 1) domainPart = intentSplit[0] intentPart = intentSplit[1] for entity in re.findall('<([^>]+)>[^<]+<\/[^>]+>', intentText): domEntMap[domainPart][entity] = 1 intEntMap[intentPart][entity] = 1 if args.domEnt: with openFile(args.domEnt, 'w') as domEntFile: for domain in sorted(domEntMap.keys()): entities = "NONE;" for entity in sorted(domEntMap[domain].keys()): entities += entity + ";" domEntFile.write(domain + ";" + entities + "\n") logger.debug("Domain-entity map '%s' was successfully created", args.domEnt) if args.domEnt: with openFile(args.intEnt, 'w') as intEntFile: for intent in sorted(intEntMap.keys()): entities = "NONE;" for entity in sorted(intEntMap[intent].keys()): entities += entity + ";" intEntFile.write(intent + ";" + entities + "\n") logger.debug("Intent-entity map '%s' was successfully created", args.domEnt) if args.list: with openFile(args.list, 'w') as listFile: # process entities entityNames = [] for entityFileName in os.listdir(args.entitiesDir): entityName = toEntityName(NAME_POLICY, args.common_entities_nameCheck, os.path.splitext(entityFileName)[0]) if entityName not in entityNames: entityNames.append(entityName) for entityName in entityNames: listFile.write(entityName + ";\n") logger.debug("Entities list '%s' was successfully created", args.list)
def main(argv): parser = argparse.ArgumentParser( description= 'Tests all dialog flows from given file and save received responses to output file', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument( 'inputFileName', help= 'file with test jsons to be sent to conversation service. (One at each line at key \'input\'.)' ) parser.add_argument( 'outputFileName', help= 'file where to store received data from conversation service. (One response at each line.)' ) # optional arguments parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) workspacesUrl = getRequiredParameter(config, 'conversation_url') version = getRequiredParameter(config, 'conversation_version') username = getRequiredParameter(config, 'conversation_username') password = getRequiredParameter(config, 'conversation_password') workspaces = filterWorkspaces( config, getWorkspaces(workspacesUrl, version, username, password)) if len(workspaces) > 1: # if there is more than one workspace with the same name -> error logger.error( 'There are more than one workspace with this name, do not know which one to test.' ) exit(1) elif len(workspaces) == 1: workspaceId = workspaces[0]['workspace_id'] else: logger.error('There is no workspace with this name, cannot test it.') exit(1) # wait until workspace is done with training checkWorkspaceTime = 0 requestUrl = workspacesUrl + '/' + workspaceId + '?version=' + version while True: logger.verbose("requestUrl: %s", requestUrl) response = requests.get(requestUrl, auth=(username, password)) if response.status_code == 200: responseJson = response.json() if errorsInResponse(responseJson): sys.exit(1) logger.verbose("response: %s", responseJson) status = responseJson['status'] logger.info('WCS WORKSPACE STATUS: %s', status) if status == 'Available': break else: # sleep some time and check messages again if checkWorkspaceTime > CHECK_WORKSPACE_TIME_MAX: logger.error( 'Workspace have not become available before timeout, timeout: %d, response: %s', CHECK_MESSAGES_TIME_MAX, json.dumps(responseJson, indent=4, sort_keys=True, ensure_ascii=False).encode('utf8')) sys.exit(1) time.sleep(CHECK_WORKSPACE_TIME_DELAY) checkWorkspaceTime = checkWorkspaceTime + CHECK_WORKSPACE_TIME_DELAY elif response.status_code == 400: logger.error('WA not available.') sys.exit(1) else: logger.error('Unknown status code:%s.', response.status_code) # run tests url = workspacesUrl + '/' + workspaceId + '/message?version=' + version receivedOutputJson = [] try: with openFile(args.inputFileName, "r") as inputFile: try: with openFile(args.outputFileName, "w") as outputFile: first = True dialogId = "" # for every input line for inputLine in inputFile: loadedJson = json.loads(inputLine) inputJson = loadedJson[ 'input_message'] # input json for tests if dialogId and dialogId == loadedJson['dialog_id']: if receivedOutputJson and 'context' in receivedOutputJson and receivedOutputJson[ 'context']: inputJson['context'] = receivedOutputJson[ 'context'] # use context from last dialog turn dialogId = loadedJson['dialog_id'] logger.verbose("url: %s", url) response = requests.post( url, auth=(username, password), headers={'Content-Type': 'application/json'}, data=json.dumps(inputJson, indent=4, ensure_ascii=False).encode('utf8')) if response.status_code == 200: receivedOutputJson = response.json() if not first: outputFile.write("\n") outputFile.write( json.dumps(receivedOutputJson, ensure_ascii=False)) first = False elif response.status_code == 400: logger.error('Error while testing.') errorsInResponse(response.json()) sys.exit(1) else: logger.error('Unknown status code:%s.', response.status_code) sys.exit(1) except IOError: logger.error('Cannot open test output file %s', args.outputFileName) sys.exit(1) except IOError: logger.error('Cannot open test input file %s', args.inputFileName) sys.exit(1) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description='Creates dialog nodes with answers to intents .', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # optional arguments parser.add_argument('-x', '--common_xls', required=False, help='file with MSExcel formated dialog', action='append') parser.add_argument('-gd', '--common_generated_dialogs', nargs='?', help='generated dialog file') parser.add_argument('-gi', '--common_generated_intents', nargs='?', help='directory for generated intents') parser.add_argument('-ge', '--common_generated_entities', nargs='?', help='directory for generated entities') parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) if hasattr(config, 'verbose') and getattr(config, 'verbose'): name_policy = 'soft_verbose' if not hasattr(config, 'common_xls'): logger.error('xls is not defined') exit(1) if not hasattr(config, 'common_generated_dialogs'): logger.verbose('generated_dialogs parameter is not defined') if not hasattr(config, 'common_generated_intents'): logger.verbose('generated_intents parameter is not defined') if not hasattr(config, 'common_generated_entities'): logger.verbose('generated_entities parameter is not defined') xlsxHandler = XLSXHandler(config) allDataBlocks = { } # map of datablocks, key: Excel sheet name, value: list of all block in the sheet logger.info(getattr(config, 'common_xls')) for fileOrFolder in getattr(config, 'common_xls'): logger.verbose('Searching in path: %s', fileOrFolder) if os.path.isdir(fileOrFolder): xlsDirList = os.listdir(fileOrFolder) for xlsFile in xlsDirList: if os.path.isfile(os.path.join(fileOrFolder, xlsFile)) and xlsFile.endswith('.xlsx') and \ not(xlsFile.startswith('~')) and not(xlsFile.startswith('.')): xlsxHandler.parseXLSXIntoDataBlocks(fileOrFolder + "/" + xlsFile) else: logger.warning( 'The file %s skipped due to failing file selection policy check. ' 'It should be .xlsx file not starting with ~ or .(dot).', os.path.join(fileOrFolder, xlsFile)) elif os.path.exists(fileOrFolder): xlsxHandler.parseXLSXIntoDataBlocks(fileOrFolder) xlsxHandler.convertBlocksToDialogData() # Blocks-> DialogData xlsxHandler.updateReferences() # Resolving cross references saveDialogDataToFileSystem(xlsxHandler.getDialogData(), XMLHandler(), config) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'Replaces sentences in text tags with codes and creates resource file with translations from codes to sentences.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('dialog', help='dialog nodes in xml format.') parser.add_argument( 'resource', help= 'file with generated translations from codes to sentences (JSON format - https://console-regional.stage1.ng.bluemix.net/docs/services/GlobalizationPipeline/bundles.html#globalizationpipeline_workingwithbundles)' ) # optional arguments parser.add_argument( '-o', '--output', required=False, help='dialog nodes in xml format with all texts replaced by codes.') parser.add_argument( '-p', '--prefix', required=False, default='TXT', help='the prefix for generated codes (alphanumeric upercase only).') parser.add_argument( '-t', '--tagsXPath', required=False, nargs='+', default=['//text[not(values)]', '//values'], help='XPath of tags whose text should be replaced by code.') parser.add_argument( '-a', '--append', required=False, help= 'append translations to the existing resource file as new ones. (Duplicate codes will be overwritten by new ones.)', action='store_true') parser.add_argument( '-j', '--join', required=False, help= 'use translations from the existing resource file and append new ones.', action='store_true') parser.add_argument('-i', '--inplace', required=False, help='replace input dialog by output.', action='store_true') parser.add_argument( '-s', '--soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) NAME_POLICY = 'soft' if args.soft else 'hard' PREFIX = toCode(NAME_POLICY, args.prefix) # load dialog from XML # TODO might need UTF-8 dialogsXML = LET.parse(args.dialog) # find all tags with texts to replace tagsToReplace = [] for tagXPath in args.tagsXPath: tagsToReplace.extend(dialogsXML.xpath(tagXPath)) # LOAD EXISTING RESOURCE FILE (TRANSLATIONS) if args.join: with openFile(args.resource, 'r') as resourceFile: translations = json.load(resourceFile) else: translations = {} counter = 0 # REPLACE ALL TEXTS WITH CODES for tagToReplace in tagsToReplace: text = tagToReplace.text logger.verbose("%s: %s", tagToReplace.tag, tagToReplace.text) # if this tag text is not in translations dictionary (it has not a code), # create new code for it and add it to dictionary if not text in translations.values(): translations[toCode(NAME_POLICY, PREFIX + str(counter))] = text counter += 1 # replace tag text by its code code = translations.keys()[translations.values().index( text)] # returns key (code) for this value (text) tagToReplace.text = '%%' + code logger.verbose("-> encoded as %s", code) # OUTPUT NEW DIALOG if args.output is not None: with openFile(args.output, 'w') as outputFile: outputFile.write( LET.tostring(dialogsXML, pretty_print=True, encoding='utf8')) elif args.inplace: with openFile(args.dialog, 'w') as outputFile: outputFile.write( LET.tostring(dialogsXML, pretty_print=True, encoding='utf8')) else: sys.stdout.write( LET.tostring(dialogsXML, pretty_print=True, encoding='utf8')) # EXTEND RESOURCE FILE if args.append: with openFile(args.resource, 'r') as resourceFile: resourceJSON = json.load(resourceFile) resourceJSON.update( translations ) # add new translations to existing ones (Duplicate codes will be overwritten by new ones.) translations = resourceJSON # CREATE RESOURCE FILE with openFile(args.resource, 'w') as resourceFile: resourceFile.write( json.dumps(translations, indent=4, ensure_ascii=False)) logger.verbose('Texts were successfully replaced with codes.')
def main(argv): parser = argparse.ArgumentParser( description= 'Decompose Bluemix conversation service workspace in .json format to intents json, entities json and dialog json', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument('workspace', help='workspace in .json format') # optional arguments parser.add_argument( '-i', '--intents', required=False, help= 'file with intents in .json format (not extracted if not specified)') parser.add_argument( '-e', '--entities', required=False, help= 'file with entities in .json format (not extracted if not specified)') parser.add_argument( '-d', '--dialog', required=False, help='file with dialog in .json format (not extracted if not specified)' ) parser.add_argument( '-c', '--counterexamples', required=False, help= 'file with counterexamples in .json format (not extracted if not specified)' ) parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) workspace_file = json.loads(openFile(args.workspace).read()) with openFile(args.workspace, 'r') as workspaceFile: workspaceJSON = json.load(workspaceFile) if args.intents: with openFile(args.intents, 'w') as intentsFile: intentsFile.write( json.dumps(workspaceJSON['intents'], indent=4, ensure_ascii=False)) if args.entities: with openFile(args.entities, 'w') as entitiesFile: entitiesFile.write( json.dumps(workspaceJSON['entities'], indent=4, ensure_ascii=False)) if args.dialog: with openFile(args.dialog, 'w') as dialogFile: dialogFile.write( json.dumps(workspaceJSON['dialog_nodes'], indent=4, ensure_ascii=False)) if args.counterexamples: with openFile(args.counterexamples, 'w') as counterexamplesFile: counterexamplesJSON = [] counterexampleIntentJSON = {} counterexampleIntentJSON['intent'] = "IRRELEVANT" counterexampleIntentJSON['examples'] = workspaceJSON[ 'counterexamples'] counterexamplesJSON.append(counterexampleIntentJSON) counterexamplesFile.write( json.dumps(counterexamplesJSON, indent=4, ensure_ascii=False)) logger.verbose("Workspace %s was successfully decomposed", args.workspace)
def main(argv): scriptsPath=os.path.dirname(__file__) defaultParamList=['shared.cfg', 'private.cfg'] parser = argparse.ArgumentParser(description='This script executes all the steps needed for building and deployment of the WeatherFrog application.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--config', help='configuaration file', action='append') parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) logger.info('STARTING: ' + os.path.basename(__file__)) logger.info('Using WAW directory: ' + os.path.dirname(__file__)) logger.verbose('THIS IS VERBOSE') #Assemble command line parameters out of parameters or defaults paramsAll = [] if hasattr(args, 'config') and args.config != None: # if config files provided - ignore defaults for strParamsItem in args.config: if os.path.isfile(strParamsItem): paramsAll += ['-c', strParamsItem] else: logger.error('Configuration file %s not found.', strParamsItem) exit(1) else: # create list of default config files for strParamsItem in defaultParamList: if os.path.isfile(strParamsItem): paramsAll += ['-c', strParamsItem] else: logger.warning('Default configuration file %s was not found, ignoring.', strParamsItem) if len(paramsAll) == 0: logger.error('Please provide at least one configuration file.') exit(1) #Execute all steps logger.verbose('python clean_generated.py '+' '.join(paramsAll)) clean_generated.main(paramsAll) logger.verbose('python dialog_xls2xml.py '+' '.join(paramsAll)) dialog_xls2xml.main(paramsAll) logger.verbose('python dialog_xml2json.py '+' '.join(paramsAll)) dialog_xml2json.main(paramsAll) logger.verbose('python entities_csv2json.py '+' '.join(paramsAll)) entities_csv2json.main(paramsAll) logger.verbose('python intents_csv2json.py '+' '.join(paramsAll)) intents_csv2json.main(paramsAll) logger.verbose('python clean_generated.py '+' '.join(paramsAll)) dialog_xml2json.main(paramsAll) logger.verbose('python workspace_compose.py '+' '.join(paramsAll)) workspace_compose.main(paramsAll) logger.verbose('python workspace_addjson.py '+' '.join(paramsAll)) workspace_addjson.main(paramsAll) logger.verbose('python workspace_deploy.py '+' '.join(paramsAll)) workspace_deploy.main(paramsAll) logger.verbose('python functions_deploy.py '+' '.join(paramsAll)) functions_deploy.main(paramsAll) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'Converts intent csv files to .json format of Watson Conversation Service', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument( '-ii', '--common_intents', help= 'directory with intent csv files to be processed (all of them will be included in output json)', action='append') #-gi is functionsally equivalent to -ii parser.add_argument( '-gi', '--common_generated_intents', help= 'directory with generated intent csv files to be processed (all of them will be included in output json)', action='append') parser.add_argument( '-od', '--common_outputs_directory', required=False, help='directory where the otputs will be stored (outputs is default)') parser.add_argument('-oi', '--common_outputs_intents', help='file with output json with all the intents') parser.add_argument( '-ni', '--common_intents_nameCheck', action='append', nargs=2, help= "regex and replacement for intent name check, e.g. '-' '_' for to replace hyphens for underscores or '$special' '\\L' for lowercase" ) parser.add_argument( '-s', '--soft', required=False, help= 'soft name policy - change intents and entities names without error.', action='store_true', default="") parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) NAME_POLICY = 'soft' if args.soft else 'hard' logger.info('STARTING: ' + os.path.basename(__file__)) if not hasattr(config, 'common_intents'): logger.info('intents parameter is not defined.') if not hasattr(config, 'common_generated_intents'): logger.info('generated_intents parameter is not defined, ignoring') if not hasattr(config, 'common_outputs_intents'): logger.info( 'Outputs_intents parameter is not defined, output will be generated to console.' ) intents = [] pathList = getattr(config, 'common_intents') if hasattr(config, 'common_generated_intents'): pathList = pathList + getattr(config, 'common_generated_intents') filesAtPath = getFilesAtPath(pathList) for intentFileName in sorted(filesAtPath): intentName = toIntentName( NAME_POLICY, args.common_intents_nameCheck, os.path.splitext(os.path.basename(intentFileName))[0]) with openFile(intentFileName, 'r', encoding='utf8') as intentFile: intent = {} intent['intent'] = intentName examples = [] for line in intentFile: # remove comments line = line.split('#')[0] line = line.rstrip().lower() #non-ascii characters fix #line = line.encode('utf-8') if line: example = processExample(line, intentName, examples) #adding to the list if example: examples.append(example) intent['examples'] = examples intents.append(intent) if hasattr(config, 'common_outputs_directory') and hasattr( config, 'common_outputs_intents'): if not os.path.exists(getattr(config, 'common_outputs_directory')): os.makedirs(getattr(config, 'common_outputs_directory')) logger.info('Created new output directory ' + getattr(config, 'common_outputs_directory')) with codecs.open(os.path.join( getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_intents')), 'w', encoding='utf8') as outputFile: outputFile.write(json.dumps(intents, indent=4, ensure_ascii=False)) else: print(json.dumps(intents, indent=4, ensure_ascii=False)) logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv): parser = argparse.ArgumentParser( description= 'Compares all dialog flows from given files and generate xml report', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # positional arguments parser.add_argument( 'expectedFileName', help= 'file with expected JSONs (One at each line at key \'output_message\')' ) parser.add_argument('receivedFileName', help='file with received JSONs') # optional arguments parser.add_argument('-o', '--output', required=False, help='name of generated xml file', default='test.junit.xml') parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) parser.add_argument('-e', '--exception_if_fail', required=False, help='script throws exception if any test fails', action='store_true') args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) testName = re.sub(r"\.[^\.]*$", "", os.path.basename(args.expectedFileName)) # expected JSON with openFile(args.expectedFileName, "r") as expectedJsonFile: # received JSON with openFile(args.receivedFileName, "r") as receivedJsonFile: # init whole test nDialogs = 0 nDialogsFailed = 0 firstFailedLine = None timeStart = time.time() # XML (whole test) outputXml = LET.Element('testsuites') # print (whole test) logger.info( '--------------------------------------------------------------------------------' ) logger.info('-- TEST: ' + testName) logger.info( '--------------------------------------------------------------------------------' ) # XML (new dialouge) dialogXml = LET.Element('testsuite') outputXml.append(dialogXml) expectedJsonLine = expectedJsonFile.readline() receivedJsonLine = receivedJsonFile.readline() line = 0 dialogId = 0 nTestsinDialog = 0 nFailuresInDialog = 0 timeDialogStart = time.time() # for every line while expectedJsonLine: line += 1 if not receivedJsonLine: # no more received line logger.error('Missing output JSON in file %s, line %d', args.receivedFileName, line) sys.exit(1) expectedData = json.loads(expectedJsonLine) expectedJson = expectedData['output_message'] receivedJson = json.loads(receivedJsonLine) if (dialogId == 0 or dialogId != expectedData['dialog_id']): if nDialogs > 0: # end previous dialog logger.info( '--------------------------------------------------------------------------------' ) if nFailuresInDialog: # at least one failure in this dialog logger.info( '-- TEST RESULT: FAILED, TOTAL FAILURES: %d, LINE OF THE FIRST FAILURE: %d', nFailuresInDialog, firstFailedLine) nDialogsFailed += 1 else: logger.info('-- TEST RESULT: OK') logger.info( '--------------------------------------------------------------------------------' ) # XML previous dialog dialogXml.attrib['name'] = 'dialog ' + str(dialogId) dialogXml.attrib['tests'] = str(nTestsinDialog) dialogXml.attrib['failures'] = str(nFailuresInDialog) dialogXml.attrib['time'] = str(time.time() - timeDialogStart) # XML (new dialouge) dialogXml = LET.Element('testsuite') outputXml.append(dialogXml) # init new dialog nDialogs += 1 nTestsinDialog = 0 nFailuresInDialog = 0 timeDialogStart = time.time() dialogId = expectedData['dialog_id'] nTestsinDialog += 1 timeLineStart = time.time() checkMessagesTime = 0 failureData = {'expectedElement': "", 'receivedElement': ""} # XML lineXml = LET.Element('testcase') dialogXml.append(lineXml) lineXml.attrib['name'] = 'line ' + str(line) lineXml.attrib['time'] = str(time.time() - timeLineStart) if not areSame(expectedJson, receivedJson, failureData, "root"): # line failure lineXml.append(createLineFailureXML(failureData)) nFailuresInDialog += 1 # in this file if firstFailedLine is None: firstFailedLine = line logger.info( 'EXPECTED OUTPUT: ' + json.dumps(expectedJson, indent=4, ensure_ascii=False)) logger.info( 'RECEIVED OUTPUT: ' + json.dumps(receivedJson, indent=4, ensure_ascii=False)) resultText = 'FAILED' else: resultText = 'OK' logger.info(' LINE: %d, RESULT: %s, TIME: %.2f sec', line, resultText, checkMessagesTime) expectedJsonLine = expectedJsonFile.readline() receivedJsonLine = receivedJsonFile.readline() # end for each line # end previous dialog logger.info( '--------------------------------------------------------------------------------' ) if nFailuresInDialog: # at least one failure in this dialog logger.info( '-- TEST RESULT: FAILED, TOTAL FAILURES: %d, LINE OF THE FIRST FAILURE: %d', nFailuresInDialog, firstFailedLine) nDialogsFailed += 1 else: logger.info('-- TEST RESULT: OK') logger.info( '--------------------------------------------------------------------------------' ) # XML previous dialog dialogXml.attrib['name'] = 'dialog ' + str(dialogId) dialogXml.attrib['tests'] = str(nTestsinDialog) dialogXml.attrib['failures'] = str(nFailuresInDialog) dialogXml.attrib['time'] = str(time.time() - timeDialogStart) if receivedJsonLine: logger.error('More than expected lines in file %s, line %d', args.receivedFileName, line) # close files logger.info( '-------------------------------------------------------------------------------' ) logger.info( '--------------------------------------------------------------------------------' ) if nDialogsFailed: logger.info( '-- SUMMARY - DIALOUGES: %s, RESULT: FAILED, FAILED DIALOGS: %d', nDialogs, nDialogsFailed) else: logger.info('-- SUMMARY - DIALOUGES: %s, RESULT: OK', nDialogs) logger.info( '--------------------------------------------------------------------------------' ) outputXml.attrib['name'] = testName outputXml.attrib['tests'] = str(nDialogs) outputXml.attrib['failures'] = str(nDialogsFailed) outputXml.attrib['timestamp'] = '{0:%Y-%b-%d %H:%M:%S}'.format( datetime.datetime.now()) outputXml.attrib['time'] = str(time.time() - timeStart) with openFile(args.output, "w") as outputFile: outputFile.write( LET.tostring(outputXml, pretty_print=True, encoding='unicode')) #as last step of our script, we raise an exception in case user required such behavior and any test failure was detected if args.exception_if_fail and nDialogsFailed: raise NameError('FailedTestDetected')
def main(argv): """Deletes the cloudfunctions package specified in the configuration file or as CLI argument.""" parser = argparse.ArgumentParser( description="Deletes cloud functions package.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('-c', '--common_configFilePaths', help="configuration file", action='append') parser.add_argument('--common_functions', required=False, help="directory where the cloud functions are located") parser.add_argument('--cloudfunctions_namespace', required=False, help="cloud functions namespace") parser.add_argument('--cloudfunctions_apikey', required=False, help="cloud functions apikey") parser.add_argument('--cloudfunctions_username', required=False, help="cloud functions user name") parser.add_argument('--cloudfunctions_password', required=False, help="cloud functions password") parser.add_argument('--cloudfunctions_package', required=False, help="cloud functions package name") parser.add_argument('--cloudfunctions_url', required=False, help="url of cloud functions API") parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) def handleResponse(response): """Get response code and show an error if it's not OK""" code = response.status_code if code != requests.codes.ok: if code == 401: logger.error( "Authorization error. Check your credentials. (Error code " + str(code) + ")") elif code == 403: logger.error( "Access is forbidden. Check your credentials and permissions. (Error code " + str(code) + ")") elif code == 404: logger.error( "The resource could not be found. Check your cloudfunctions url and namespace. (Error code " + str(code) + ")") elif code >= 500: logger.error("Internal server error. (Error code " + str(code) + ")") else: logger.error("Unexpected error code: " + str(code)) errorsInResponse(response.json()) return False return True def isActionSequence(action): for annotation in action['annotations']: if 'key' in annotation and annotation['key'] == 'exec': if 'value' in annotation and annotation['value'] == 'sequence': return True return False config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) namespace = getRequiredParameter(config, 'cloudfunctions_namespace') urlNamespace = quote(namespace) auth = getParametersCombination( config, 'cloudfunctions_apikey', ['cloudfunctions_password', 'cloudfunctions_username']) package = getRequiredParameter(config, 'cloudfunctions_package') cloudfunctionsUrl = getRequiredParameter(config, 'cloudfunctions_url') functionDir = getRequiredParameter(config, 'common_functions') if 'cloudfunctions_apikey' in auth: username, password = convertApikeyToUsernameAndPassword( auth['cloudfunctions_apikey']) else: username = auth['cloudfunctions_username'] password = auth['cloudfunctions_password'] logger.info("Will delete cloud functions in package '" + package + "'.") requests.packages.urllib3.disable_warnings(InsecureRequestWarning) packageUrl = cloudfunctionsUrl + '/' + urlNamespace + '/packages/' + package response = requests.get(packageUrl, auth=(username, password), headers={'Content-Type': 'application/json'}) if not handleResponse(response): logger.critical("Unable to get information about package '" + package + "'.") sys.exit(1) actions = response.json()['actions'] # put the sequences at the beggining actions.sort(key=lambda action: isActionSequence(action)) for action in actions: name = action['name'] actionUrl = cloudfunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + name logger.verbose("Deleting action '" + name + "' at " + actionUrl) response = requests.delete( actionUrl, auth=(username, password), headers={'Content-Type': 'application/json'}) if not handleResponse(response): logger.critical("Unable to delete action " + name + "' at " + actionUrl) sys.exit(1) logger.verbose("Action deleted.") logger.verbose("Deleting package '" + package + "' at " + packageUrl) response = requests.delete(packageUrl, auth=(username, password), headers={'Content-Type': 'application/json'}) if not handleResponse(response): logger.critical("Unable to delete package '" + package + "' at " + packageUrl) sys.exit(1) logger.verbose("Package deleted.") logger.info("Cloud functions in package successfully deleted.")
def main(argv): parser = argparse.ArgumentParser(description="Deploys a workspace in json format\ to the Watson Conversation Service. If there is no 'conversation_workspace_id' provided\ and the 'conversation_workspace_name_unique' is set to 'true', it uploads\ a workspace to the place specified by the 'conversation_workspace_name'" ,\ formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-of', '--common_outputs_directory', required=False, help='directory where the otputs are stored') parser.add_argument('-ow', '--common_outputs_workspace', required=False, help='name of the json file with workspace') parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append') parser.add_argument('-oc', '--common_output_config', help='output configuration file') parser.add_argument('-cu', '--conversation_url', required=False, help='url of the conversation service API') parser.add_argument('-cv', '--conversation_version', required=False, help='version of the conversation service API') parser.add_argument('-cn', '--conversation_username', required=False, help='username of the conversation service instance') parser.add_argument('-cp', '--conversation_password', required=False, help='password of the conversation service instance') parser.add_argument( '-cid', '--conversation_workspace_id', required=False, help= 'workspace_id of the application. If a workspace id is provided, previous workspace content is overwritten, otherwise a new workspace is created ' ) parser.add_argument('-wn', '--conversation_workspace_name', required=False, help='name of the workspace') parser.add_argument( '-wnu', '--conversation_workspace_name_unique', required=False, help= 'true if the workspace name should be unique across apecified assistant' ) parser.add_argument('-v', '--verbose', required=False, help='verbosity', action='store_true') parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values())) args = parser.parse_args(argv) if __name__ == '__main__': setLoggerConfig(args.log, args.verbose) config = Cfg(args) logger.info('STARTING: ' + os.path.basename(__file__)) # workspace info try: workspaceFilePath = os.path.join( getRequiredParameter(config, 'common_outputs_directory'), getRequiredParameter(config, 'common_outputs_workspace')) with openFile(workspaceFilePath, 'r') as workspaceFile: workspace = json.load(workspaceFile) except IOError: logger.error('Cannot load workspace file %s', workspaceFilePath) sys.exit(1) # workspace name workspaceName = getOptionalParameter(config, 'conversation_workspace_name') if workspaceName: workspace['name'] = workspaceName # workspace language workspaceLanguage = getOptionalParameter(config, 'conversation_language') if workspaceLanguage: workspace['language'] = workspaceLanguage # credentials (required) username = getRequiredParameter(config, 'conversation_username') password = getRequiredParameter(config, 'conversation_password') # url (required) workspacesUrl = getRequiredParameter(config, 'conversation_url') # version (required) version = getRequiredParameter(config, 'conversation_version') # workspace id workspaces = filterWorkspaces( config, getWorkspaces(workspacesUrl, version, username, password)) if len(workspaces) > 1: # if there is more than one workspace with the same name -> error logger.error( 'There are more than one workspace with this name, do not know which one to update.' ) exit(1) elif len(workspaces) == 1: workspaceId = workspaces[0]['workspace_id'] logger.info("Updating existing workspace.") else: workspaceId = "" logger.info("Creating new workspace.") requestUrl = workspacesUrl + '/' + workspaceId + '?version=' + version # create/update workspace response = requests.post(requestUrl, auth=(username, password), headers={'Content-Type': 'application/json'}, data=json.dumps(workspace, indent=4)) responseJson = response.json() logger.verbose("response: %s", responseJson) if not errorsInResponse(responseJson): logger.info('Workspace successfully uploaded.') else: logger.error('Cannot upload workspace.') sys.exit(1) if not getOptionalParameter(config, 'conversation_workspace_id'): setattr(config, 'conversation_workspace_id', responseJson['workspace_id']) logger.info('WCS WORKSPACE_ID: %s', responseJson['workspace_id']) outputConfigFile = getOptionalParameter(config, 'common_output_config') if outputConfigFile: config.saveConfiguration(outputConfigFile) clientName = getOptionalParameter(config, 'context_client_name') if clientName: # Assembling uri of the client clientv2URL = 'https://clientv2-latest.mybluemix.net/#defaultMinMode=true' clientv2URL += '&prefered_workspace_id=' + getattr( config, 'conversation_workspace_id') clientv2URL += '&prefered_workspace_name=' + getattr( config, 'conversation_workspace_name') clientv2URL += '&shared_examples_service=&url=http://zito.mybluemix.net' clientv2URL += '&username='******'conversation_username') clientv2URL += '&custom_ui.title=' + getattr( config, 'conversation_workspace_name') clientv2URL += '&password='******'conversation_password') clientv2URL += '&custom_ui.machine_img=' clientv2URL += '&custom_ui.user_img=' clientv2URL += '&context.user_name=' + getattr(config, 'context_client_name') clientv2URL += '&context.link_build_date=' + unicode( datetime.datetime.now().strftime("%y-%m-%d-%H-%M")) clientv2URL += '&prefered_tts=none' clientv2URL += '&bluemix_tts.username=xx' clientv2URL += '&bluemix_tts.password=xx' clientv2URL += '&compact_mode=true' clientv2URL += '&compact_switch_enabled=true' clientv2URL += 'developer_switch_enabled=false' logger.info('clientv2URL=%s', clientv2URL) # create file with automatic redirect clientFileName = getOptionalParameter(config, 'common_outputs_client') if clientFileName: clientFilePath = os.path.join( getRequiredParameter(config, 'common_outputs_directory'), clientFileName) try: with openFile(clientFilePath, "w") as clientFile: clientFile.write( '<meta http-equiv="refresh" content=\"0; url=' + clientv2URL + '\" />') clientFile.write('<p><a href=\"' + clientv2URL + '\">Redirect</a></p>') clientFile.close() except IOError: logger.error('Cannot write to %s', clientFilePath) sys.exit(1) logger.info('FINISHING: ' + os.path.basename(__file__))