def main(argv):
    parser = argparse.ArgumentParser(description='This script takes a workspace JSON as one parameter and another JSON (i.e., piece of context data structure) and put the second one into desired place in the first one. This happens inplace.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # arguments
    parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append')
    parser.add_argument('-w','--common_outputs_workspace', required=False, help='filename of the original workspace JSON')
    parser.add_argument('-d','--common_outputs_directory', required=False, help='directory, where the workspace is located')
    parser.add_argument('-j','--includejsondata_jsonfile', required=False, help='file with JSON you want to include')
    parser.add_argument('-t','--includejsondata_targetkey', required=False, help='the key, where you want to add your JSON, i.e., "data_structure" : null; where you want to replace null, you would put "data_strucute" as this parameter')
    # optional arguments
    parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true')
    parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values()))
    #init the parameters
    args = parser.parse_args(argv)
    
    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    config = Cfg(args)

    logger.info('STARTING: ' + os.path.basename(__file__))

    # get required parameters
    # workspace
    with codecs.open(os.path.join(getRequiredParameter(config, 'common_outputs_directory'), getRequiredParameter(config, 'common_outputs_workspace')), 'r', encoding='utf8') as inputpath:
        try:
            workspaceInput = json.load(inputpath)
        except:
            logger.error('Workspace JSON is not valid JSON: %s', os.path.join(getRequiredParameter(config, 'common_outputs_directory'), getRequiredParameter(config, 'common_outputs_workspace')))
            exit(1)
    # json to add
    with codecs.open(os.path.join(getRequiredParameter(config, 'includejsondata_jsonfile')), 'r', encoding='utf8') as jsonincludepath:
        try:
            jsonInclude = json.load(jsonincludepath)
        except:
            logger.error('JSON to include is not valid JSON: %s', os.path.join(getRequiredParameter(config, 'includejsondata_jsonfile')))
            exit(1)
    # target element
    targetKey = getRequiredParameter(config, 'includejsondata_targetkey')

    # find the target key and add the json
    replacedValuesNumber = 0
    if 'dialog_nodes' in workspaceInput:
        workspaceInput['dialog_nodes'], replacedValuesNumber = replaceValue(workspaceInput['dialog_nodes'], targetKey, jsonInclude)
    else:
        logger.warning('Workspace does not contain \'dialog_nodes\'')

    # writing the file
    with codecs.open(os.path.join(getattr(config, 'common_outputs_directory'), getattr(config, 'common_outputs_workspace')), 'w', encoding='utf8')  as outfile:
        json.dump(workspaceInput, outfile, indent=4)

    if replacedValuesNumber == 0:
        logger.warning('Target key not found.')
    else:
        logger.info('Writing workspaces with added JSON successfull.')

    logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv):
    # parse sequence names - because we need to get the name first and
    # then create corresponding arguments for the main parser
    sequenceSubparser = argparse.ArgumentParser()
    sequenceSubparser.add_argument('--cloudfunctions_sequences', nargs='+')
    argvWithoutHelp = list(argv)
    if "--help" in argv: argvWithoutHelp.remove("--help")
    if "-h" in argv: argvWithoutHelp.remove("-h")
    sequenceNames = sequenceSubparser.parse_known_args(
        argvWithoutHelp)[0].cloudfunctions_sequences or []

    parser = argparse.ArgumentParser(
        description="Deploys the cloud functions",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-v',
                        '--verbose',
                        required=False,
                        help='verbosity',
                        action='store_true')
    parser.add_argument('-c',
                        '--common_configFilePaths',
                        help="configuaration file",
                        action='append')
    parser.add_argument('--common_functions',
                        required=False,
                        help="directory where the cloud functions are located")
    parser.add_argument('--cloudfunctions_namespace',
                        required=False,
                        help="cloud functions namespace")
    parser.add_argument('--cloudfunctions_apikey',
                        required=False,
                        help="cloud functions apikey")
    parser.add_argument('--cloudfunctions_username',
                        required=False,
                        help="cloud functions user name")
    parser.add_argument('--cloudfunctions_password',
                        required=False,
                        help="cloud functions password")
    parser.add_argument('--cloudfunctions_package',
                        required=False,
                        help="cloud functions package name")
    parser.add_argument('--cloudfunctions_url',
                        required=False,
                        help="url of cloud functions API")
    parser.add_argument('--log',
                        type=str.upper,
                        default=None,
                        choices=list(logging._levelToName.values()))
    parser.add_argument('--cloudfunctions_sequences',
                        nargs='+',
                        required=False,
                        help="cloud functions sequence names")

    for runtime in list(interpretedRuntimes.values()) + list(
            compiledRuntimes.values()):
        parser.add_argument('--cloudfunctions_' + runtime + '_version',
                            required=False,
                            help="cloud functions " + runtime + " version")

    # Add arguments for each sequence to be able to define the functions in the sequence
    for sequenceName in sequenceNames:
        try:
            parser.add_argument("--cloudfunctions_sequence_" + sequenceName,
                                required=True,
                                help="functions in sequence '" + sequenceName +
                                "'")
        except argparse.ArgumentError as e:
            if "conflicting option" in str(e):
                # from None is needed in order to show only the custom exception and not the whole traceback
                # (It would read as 'During handling of the above exception, another exception has occurred', but we DID handle it)
                raise argparse.ArgumentError(
                    None, "Duplicate sequence name: " + sequenceName) from None
            else:
                raise e

    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    logger.info('STARTING: ' + os.path.basename(__file__))

    def handleResponse(response):
        """Get response code and show an error if it's not OK"""
        code = response.status_code
        if code != requests.codes.ok:
            if code == 401:
                logger.error(
                    "Authorization error. Check your credentials. (Error code "
                    + str(code) + ")")
            elif code == 403:
                logger.error(
                    "Access is forbidden. Check your credentials and permissions. (Error code "
                    + str(code) + ")")
            elif code == 404:
                logger.error(
                    "The resource could not be found. Check your cloudfunctions url and namespace. (Error code "
                    + str(code) + ")")
            elif code == 408:
                logger.error("Request Timeout. (Error code " + str(code) + ")")
            elif code >= 500:
                logger.error("Internal server error. (Error code " +
                             str(code) + ")")
            else:
                logger.error("Unexpected error code: " + str(code))

            errorsInResponse(response.json())
            return False
        return True

    config = Cfg(args)

    namespace = getRequiredParameter(config, 'cloudfunctions_namespace')
    urlNamespace = quote(namespace)
    auth = getParametersCombination(
        config, 'cloudfunctions_apikey',
        ['cloudfunctions_password', 'cloudfunctions_username'])
    package = getRequiredParameter(config, 'cloudfunctions_package')
    cloudFunctionsUrl = getRequiredParameter(config, 'cloudfunctions_url')
    functionDir = getRequiredParameter(config, 'common_functions')
    # If sequence names are already defined (from console), do nothing. Else look for them in the configuration.
    if not sequenceNames:
        sequenceNames = getOptionalParameter(config,
                                             'cloudfunctions_sequences') or []
    # SequenceNames has to be a list
    if type(sequenceNames) is str:
        sequenceNames = [sequenceNames]
    # Create a dict of {<seqName>: [<functions 1>, <function2> ,...]}
    sequences = {
        seqName: getRequiredParameter(config,
                                      "cloudfunctions_sequence_" + seqName)
        for seqName in sequenceNames
    }

    if 'cloudfunctions_apikey' in auth:
        username, password = convertApikeyToUsernameAndPassword(
            auth['cloudfunctions_apikey'])
    else:
        username = auth['cloudfunctions_username']
        password = auth['cloudfunctions_password']

    runtimeVersions = {}
    for ext, runtime in list(interpretedRuntimes.items()) + list(
            compiledRuntimes.items()):
        runtimeVersions[runtime] = runtime + ':' + getattr(
            config, 'cloudfunctions_' + runtime + '_version', 'default')

    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    packageUrl = cloudFunctionsUrl + '/' + urlNamespace + '/packages/' + package + '?overwrite=true'
    logger.info("Will create cloudfunctions package %s.", package)
    response = requests.put(packageUrl,
                            auth=(username, password),
                            headers={'Content-Type': 'application/json'},
                            data='{}')
    if not handleResponse(response):
        logger.critical("Cannot create cloud functions package %s.", package)
        sys.exit(1)
    else:
        logger.info('Cloud functions package successfully uploaded')

    filesAtPath = getFilesAtPath(functionDir, [
        '*' + ext for ext in (list(interpretedRuntimes) +
                              list(compiledRuntimes) + compressedFiles)
    ])

    logger.info("Will deploy functions at paths %s.", functionDir)

    for functionFilePath in filesAtPath:
        fileName = os.path.basename(functionFilePath)
        (funcName, ext) = os.path.splitext(fileName)

        runtime = None
        binary = False
        # if the file is zip, it's necessary to look inside
        if ext == '.zip':
            runtime = _getZipPackageType(functionFilePath)
            if not runtime:
                logger.warning(
                    "Cannot determine function type from zip file '%s'. Skipping!",
                    functionFilePath)
                continue
            binary = True
        else:
            if ext in interpretedRuntimes:
                runtime = interpretedRuntimes[ext]
                binary = False
            elif ext in compiledRuntimes:
                runtime = compiledRuntimes[ext]
                binary = True
            else:
                logger.warning(
                    "Cannot determine function type of '%s'. Skipping!",
                    functionFilePath)
                continue

        functionUrl = cloudFunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + funcName + '?overwrite=true'

        if binary:
            content = base64.b64encode(open(functionFilePath,
                                            'rb').read()).decode('utf-8')
        else:
            content = open(functionFilePath, 'r').read()
        payload = {
            'exec': {
                'kind': runtimeVersions[runtime],
                'binary': binary,
                'code': content
            }
        }

        logger.verbose("Deploying function %s", funcName)
        response = requests.put(functionUrl,
                                auth=(username, password),
                                headers={'Content-Type': 'application/json'},
                                data=json.dumps(payload),
                                verify=False)
        if not handleResponse(response):
            logger.critical("Cannot deploy cloud function %s.", funcName)
            sys.exit(1)
        else:
            logger.verbose('Cloud function %s successfully deployed.',
                           funcName)
    logger.info("Cloudfunctions successfully deployed.")

    if sequences:
        logger.info("Will deploy cloudfunction sequences.")

    for seqName in sequences:
        sequenceUrl = cloudFunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + seqName + '?overwrite=true'
        functionNames = sequences[seqName]
        fullFunctionNames = [
            namespace + '/' + package + '/' + functionName
            for functionName in functionNames
        ]
        payload = {
            'exec': {
                'kind': 'sequence',
                'binary': False,
                'components': fullFunctionNames
            }
        }
        logger.verbose("Deploying cloudfunctions sequence '%s': %s", seqName,
                       functionNames)
        response = requests.put(sequenceUrl,
                                auth=(username, password),
                                headers={'Content-Type': 'application/json'},
                                data=json.dumps(payload),
                                verify=False)
        if not handleResponse(response):
            logger.critical("Cannot deploy cloudfunctions sequence %s",
                            seqName)
            sys.exit(1)
        else:
            logger.verbose("Sequence '%s' deployed.", seqName)
    if sequences:
        logger.info("Cloudfunction sequences successfully deployed.")
    logger.info('FINISHING: ' + os.path.basename(__file__))
def main(argv):
    logger.info('STARTING: ' + os.path.basename(__file__))
    parser = argparse.ArgumentParser(
        description=
        'Deletes Bluemix conversation service workspace and deletes workspace id from config file.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c',
                        '--common_configFilePaths',
                        help='configuaration file',
                        action='append')
    parser.add_argument('-oc',
                        '--common_output_config',
                        help='output configuration file')
    parser.add_argument('-cu',
                        '--conversation_url',
                        required=False,
                        help='url of the conversation service API')
    parser.add_argument('-cv',
                        '--conversation_version',
                        required=False,
                        help='version of the conversation service API')
    parser.add_argument('-cn',
                        '--conversation_username',
                        required=False,
                        help='username of the conversation service instance')
    parser.add_argument('-cp',
                        '--conversation_password',
                        required=False,
                        help='password of the conversation service instance')
    parser.add_argument('-cid',
                        '--conversation_workspace_id',
                        required=False,
                        help='workspace_id of the application.')
    parser.add_argument('-wn',
                        '--conversation_workspace_name',
                        required=False,
                        help='name of the workspace')
    parser.add_argument(
        '-wnm',
        '--conversation_workspace_match_by_name',
        required=False,
        help=
        'true if the workspace name should be matched by name (or pattern if defined)'
    )
    parser.add_argument(
        '-wnp',
        '--conversation_workspace_name_pattern',
        required=False,
        help='regex pattern specifying a name of workspaces to be deleted')
    parser.add_argument('-v',
                        '--verbose',
                        required=False,
                        help='verbosity',
                        action='store_true')
    parser.add_argument('--log',
                        type=str.upper,
                        default=None,
                        choices=list(logging._levelToName.values()))
    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    config = Cfg(args)

    # load credentials
    version = getRequiredParameter(config, 'conversation_version')
    workspacesUrl = getRequiredParameter(config, 'conversation_url')
    username = getRequiredParameter(config, 'conversation_username')
    password = getRequiredParameter(config, 'conversation_password')
    try:
        workspaces = filterWorkspaces(
            config, getWorkspaces(workspacesUrl, version, username, password))
    except SystemExit as e:
        logger.error("Failed to retrieve workspaces to delete.")
        sys.exit(1)

    nWorkspacesDeleted = 0
    for workspace in workspaces:
        # delete workspace
        requestUrl = workspacesUrl + '/' + workspace[
            'workspace_id'] + '?version=' + version
        response = requests.delete(requestUrl,
                                   auth=(username, password),
                                   headers={'Accept': 'text/html'})
        responseJson = response.json()
        # check errors during upload
        errorsInResponse(responseJson)

        if response.status_code == 200:
            nWorkspacesDeleted += 1
            logger.info("Workspace '%s' was successfully deleted",
                        workspace['name'])
            # delete workspaceId from config file
            if hasattr(config, 'conversation_workspace_id'):
                delattr(config, 'conversation_workspace_id')
        elif response.status_code == 400:
            logger.error(
                "Error while deleting workspace  '%s', status code '%s' (invalid request)",
                workspace['name'], response.status_code)
            sys.exit(1)
        else:
            logger.error(
                "Error while deleting workspace  '%s', status code '%s'",
                workspace['name'], response.status_code)
            sys.exit(1)

    if not nWorkspacesDeleted:
        logger.info("No workspace has been deleted")
    elif nWorkspacesDeleted == 1:
        logger.info("One workspace has been successfully deleted")
    else:
        logger.info(
            str(nWorkspacesDeleted) +
            " workspaces have been successfully deleted")

    outputConfigFile = getOptionalParameter(config, 'common_output_config')
    if outputConfigFile:
        config.saveConfiguration(outputConfigFile)
        logger.info("Configuration was saved to %s", outputConfigFile)
def main(argv):
    """Deletes the cloudfunctions package specified in the configuration file or as CLI argument."""
    parser = argparse.ArgumentParser(
        description="Deletes cloud functions package.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-v',
                        '--verbose',
                        required=False,
                        help='verbosity',
                        action='store_true')
    parser.add_argument('-c',
                        '--common_configFilePaths',
                        help="configuration file",
                        action='append')
    parser.add_argument('--common_functions',
                        required=False,
                        help="directory where the cloud functions are located")
    parser.add_argument('--cloudfunctions_namespace',
                        required=False,
                        help="cloud functions namespace")
    parser.add_argument('--cloudfunctions_apikey',
                        required=False,
                        help="cloud functions apikey")
    parser.add_argument('--cloudfunctions_username',
                        required=False,
                        help="cloud functions user name")
    parser.add_argument('--cloudfunctions_password',
                        required=False,
                        help="cloud functions password")
    parser.add_argument('--cloudfunctions_package',
                        required=False,
                        help="cloud functions package name")
    parser.add_argument('--cloudfunctions_url',
                        required=False,
                        help="url of cloud functions API")
    parser.add_argument('--log',
                        type=str.upper,
                        default=None,
                        choices=list(logging._levelToName.values()))

    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    def handleResponse(response):
        """Get response code and show an error if it's not OK"""
        code = response.status_code
        if code != requests.codes.ok:
            if code == 401:
                logger.error(
                    "Authorization error. Check your credentials. (Error code "
                    + str(code) + ")")
            elif code == 403:
                logger.error(
                    "Access is forbidden. Check your credentials and permissions. (Error code "
                    + str(code) + ")")
            elif code == 404:
                logger.error(
                    "The resource could not be found. Check your cloudfunctions url and namespace. (Error code "
                    + str(code) + ")")
            elif code >= 500:
                logger.error("Internal server error. (Error code " +
                             str(code) + ")")
            else:
                logger.error("Unexpected error code: " + str(code))

            errorsInResponse(response.json())
            return False
        return True

    def isActionSequence(action):
        for annotation in action['annotations']:
            if 'key' in annotation and annotation['key'] == 'exec':
                if 'value' in annotation and annotation['value'] == 'sequence':
                    return True
        return False

    config = Cfg(args)
    logger.info('STARTING: ' + os.path.basename(__file__))

    namespace = getRequiredParameter(config, 'cloudfunctions_namespace')
    urlNamespace = quote(namespace)
    auth = getParametersCombination(
        config, 'cloudfunctions_apikey',
        ['cloudfunctions_password', 'cloudfunctions_username'])
    package = getRequiredParameter(config, 'cloudfunctions_package')
    cloudfunctionsUrl = getRequiredParameter(config, 'cloudfunctions_url')
    functionDir = getRequiredParameter(config, 'common_functions')

    if 'cloudfunctions_apikey' in auth:
        username, password = convertApikeyToUsernameAndPassword(
            auth['cloudfunctions_apikey'])
    else:
        username = auth['cloudfunctions_username']
        password = auth['cloudfunctions_password']

    logger.info("Will delete cloud functions in package '" + package + "'.")

    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    packageUrl = cloudfunctionsUrl + '/' + urlNamespace + '/packages/' + package
    response = requests.get(packageUrl,
                            auth=(username, password),
                            headers={'Content-Type': 'application/json'})
    if not handleResponse(response):
        logger.critical("Unable to get information about package '" + package +
                        "'.")
        sys.exit(1)

    actions = response.json()['actions']
    # put the sequences at the beggining
    actions.sort(key=lambda action: isActionSequence(action))

    for action in actions:
        name = action['name']
        actionUrl = cloudfunctionsUrl + '/' + urlNamespace + '/actions/' + package + '/' + name
        logger.verbose("Deleting action '" + name + "' at " + actionUrl)
        response = requests.delete(
            actionUrl,
            auth=(username, password),
            headers={'Content-Type': 'application/json'})
        if not handleResponse(response):
            logger.critical("Unable to delete action " + name + "' at " +
                            actionUrl)
            sys.exit(1)
        logger.verbose("Action deleted.")

    logger.verbose("Deleting package '" + package + "' at " + packageUrl)
    response = requests.delete(packageUrl,
                               auth=(username, password),
                               headers={'Content-Type': 'application/json'})
    if not handleResponse(response):
        logger.critical("Unable to delete package '" + package + "' at " +
                        packageUrl)
        sys.exit(1)
    logger.verbose("Package deleted.")
    logger.info("Cloud functions in package successfully deleted.")
def main(argv):
    '''
    Scripts takes input json file that represents test that should be run against
    Cloud Functions and produce output that extends input json file by results
    from CFs and evaluation.

    Inputs and expected outputs can contain string values that starts with '::'
    (e.g. "key": "::valueToBeReplaced1") which will be replaced by matching 
    configuration parameters or by values specified by parameter 'replace'
    (format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\')).

    Input json file example:
    [
        {
            "name": "test example 1", # OPTIONAL
            "type": "EXACT_MATCH", # OPTIONAL (DEFAULT = EXACT_MATCH, OPTIONS = [EXACT_MATCH])
            "cf_package": "<CLOUD FUNCTIONS PACKAGE NAME>", # OPTIONAL (could be provided directly to script, at least one has to be specified, test level overrides global script one)
            "cf_function": "<CLOUD FUNCTIONS SPECIFIC FUNCTION TO BE TESTED>", # --||--
            "input": <OBJECT> | <@PATH/TO/FILE>, # payload to be send to CF (could be specified as a relative or absolute path to the file that contains json file, e.g. "input": "@inputs/test_example_1.json")
            "outputExpected": <OBJECT> | <@PATH/TO/FILE>, # expected payload to be return from CF (--||--)
        },
        {
            "name": "test example 2",
            ...
              rest of the test definition
            ...
        }
    ]

    Output json file example:
    [
        {
            "name": "test example 1",
            ...
              rest of the input test definition
            ...
            "outputReturned": <OBJECT>, # returned payload from CF
            "result": <0 - test passed, 1 - test failed>
            "diff": <OBJECT> # if test passed then "diff" is Null, else contains object that represents differences
        }
    ]
    '''
    parser = argparse.ArgumentParser(description='Tests all tests specified in given file against Cloud Functions and save test outputs to output file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # positional arguments
    parser.add_argument('inputFileName', help='File with json array containing tests.')
    parser.add_argument('outputFileName', help='File where to store test outputs.')
    # optional arguments
    parser.add_argument('-c', '--common_configFilePaths', help='configuaration file', action='append')
    parser.add_argument('--cloudfunctions_url', required=False, help='url of cloud functions API')
    parser.add_argument('--cloudfunctions_namespace', required=False, help='cloud functions namespace')
    parser.add_argument('--cloudfunctions_package', required=False, help='cloud functions package name')
    parser.add_argument('--cloudfunctions_function', required=False, help='cloud functions specific function to be tested')
    parser.add_argument('--cloudfunctions_apikey', required=False, help="cloud functions apikey")
    parser.add_argument('--cloudfunctions_username', required=False, help='cloud functions user name')
    parser.add_argument('--cloudfunctions_password', required=False, help='cloud functions password')
    parser.add_argument('-v','--verbose', required=False, help='verbosity', action='store_true')
    parser.add_argument('--log', type=str.upper, default=None, choices=list(logging._levelToName.values()))
    parser.add_argument('--replace', required=False, help='string values to be replaced in input and expected output json (format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\')')
    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    config = Cfg(args)

    logger.info('STARTING: '+ os.path.basename(__file__))

    url = getRequiredParameter(config, 'cloudfunctions_url')
    namespace = getRequiredParameter(config, 'cloudfunctions_namespace')
    auth = getParametersCombination(config, 'cloudfunctions_apikey', ['cloudfunctions_password', 'cloudfunctions_username'])
    package = getOptionalParameter(config, 'cloudfunctions_package')
    function = getOptionalParameter(config, 'cloudfunctions_function')

    if 'cloudfunctions_apikey' in auth:
        username, password = convertApikeyToUsernameAndPassword(auth['cloudfunctions_apikey'])
    else:
        username = auth['cloudfunctions_username']
        password = auth['cloudfunctions_password']

    try:
        inputFile = open(args.inputFileName, 'r')
    except IOError:
        logger.critical('Cannot open test input file %s', args.inputFileName)
        sys.exit(1)

    try:
        outputFile = open(args.outputFileName, 'w')
    except IOError:
        logger.critical('Cannot open test output file %s', args.outputFileName)
        sys.exit(1)

    try:
        inputJson = json.load(inputFile)
    except ValueError as e:
        logger.critical('Cannot decode json from test input file %s, error: %s', args.inputFileName, str(e))
        sys.exit(1)

    if not isinstance(inputJson, list):
        logger.critical('Input test json is not array!')
        sys.exit(1)

    replaceDict = {}
    for attr in dir(config):
        if not attr.startswith("__"):
            if attr == 'replace':
                # format \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\'
                replacementsString = getattr(config, attr)
                for replacementString in replacementsString.split(','):
                    replacementStringSplit = replacementString.split(':')
                    if len(replacementStringSplit) != 2 or not replacementStringSplit[0] or not replacementStringSplit[1]:
                        logger.critical('Invalid format of \'replace\' parameter, valid format is \'valueToBeReplaced1:replacement1,valueToBeReplaced2:replacement2\'')
                        sys.exit(1)
                    replaceDict[replacementStringSplit[0]] = replacementStringSplit[1]
            else:
                replaceDict[attr] = getattr(config, attr)

    # run tests
    testCounter = 0
    for test in inputJson:
        if not isinstance(test, dict):
            logger.error('Input test array element %d is not dictionary. Each test has to be dictionary, please see doc!', testCounter)
            continue
        logger.info('Test number: %d, name: %s', testCounter, (test['name'] if 'name' in test else '-'))
        testUrl = \
            url + ('' if url.endswith('/') else '/') + \
            namespace + '/actions/' + (test['cf_package'] if 'cf_package' in test else package) + '/' + \
            (test['cf_function'] if 'cf_function' in test else function) + \
            '?blocking=true&result=true'
        logger.info('Tested function url: %s', testUrl)

        # load test input payload json
        testInputJson = test['input']
        testInputPath = None
        try:
            if testInputJson.startswith('@'): 
                testInputPath = os.path.join(os.path.dirname(args.inputFileName), testInputJson[1:])
                logger.debug('Loading input payload from file: %s', testInputPath)
                try:
                    inputFile = open(testInputPath, 'r')
                except IOError:
                    logger.error('Cannot open input payload from file: %s', testInputPath)
                    continue
                try:
                    testInputJson = json.load(inputFile)
                except ValueError as e:
                    logger.error('Cannot decode json from input payload from file %s, error: %s', testInputPath, str(e))
                    continue
        except:
            pass

        if not testInputPath:
            logger.debug('Input payload provided inside the test')

        # load test expected output payload json
        testOutputExpectedJson = test['outputExpected']
        testOutputExpectedPath = None
        try:
            if testOutputExpectedJson.startswith('@'):
                testOutputExpectedPath = os.path.join(os.path.dirname(args.inputFileName), testOutputExpectedJson[1:])
                logger.debug('Loading expected output payload from file: %s', testOutputExpectedPath)
                try:
                    outputExpectedFile = open(testOutputExpectedPath, 'r')
                except IOError:
                    logger.error('Cannot open expected output payload from file: %s', testOutputExpectedPath)
                    continue
                try:
                    testOutputExpectedJson = json.load(outputExpectedFile)
                except ValueError as e:
                    logger.error('Cannot decode json from expected output payload from file %s, error: %s', testOutputExpectedPath, str(e))
                    continue
        except:
            pass

        if not testOutputExpectedPath:
            logger.debug('Expected output payload provided inside the test')

        logger.debug('Replacing values in input and expected output jsons by configuration parameters.')

        for target, value in replaceDict.items():
            testInputJson, replacementNumber = replaceValue(testInputJson, '::' + target, value, False)
            if replacementNumber > 0:
                logger.debug('Replaced configuration parameter \'%s\' in input json, number of occurences: %d.', target, replacementNumber)
            testOutputExpectedJson, replacementNumber = replaceValue(testOutputExpectedJson, '::' + target, value, False)
            if replacementNumber > 0:
                logger.debug('Replaced configuration parameter \'%s\' in expected output json, number of occurences: %d.', target, replacementNumber)

        # call CF
        logger.debug('Sending input json: %s', json.dumps(testInputJson, ensure_ascii=False).encode('utf8'))
        response = requests.post(
            testUrl, 
            auth=(username, password), 
            headers={'Content-Type': 'application/json'}, 
            data=json.dumps(testInputJson, ensure_ascii=False).encode('utf8'))

        responseContentType = response.headers.get('content-type')
        if responseContentType != 'application/json':
            logger.error('Response content type is not json, content type: %s, response:\n%s', responseContentType, response.text)
            continue

        # check status
        if response.status_code == 200:
            testOutputReturnedJson = response.json()
            logger.debug('Received output json: %s', json.dumps(testOutputReturnedJson, ensure_ascii=False).encode('utf8'))
            test['outputReturned'] = testOutputReturnedJson

            # evaluate test
            if 'type' not in test or test['type'] == 'EXACT_MATCH':
                testResultString = DeepDiff(testOutputExpectedJson, testOutputReturnedJson, ignore_order=True).json
                testResultJson = json.loads(testResultString)
                if testResultJson == {}:
                    test['result'] = 0
                else:
                    test['result'] = 1
                    test['diff'] = testResultJson
            else:
                logger.error('Unknown test type: %s', test['type'])
        elif response.status_code in [202, 403, 404, 408]:
            # 202 Accepted activation request (should not happen while sending 'blocking=true&result=true')
            # 403 Forbidden (could be just for specific package or function)
            # 404 Not Found (action or package could be incorrectly specified for given test)
            # 408 Request Timeout (could happen e.g. for CF that calls some REST APIs, e.g. Discovery service)
            # 502 Bad Gateway (when the CF raises exception, e.g. bad params where provided)
            # => Could be issue just for given test, so we don't want to stop whole testing.
            logger.error('Unexpected response status: %d, response: %s', response.status_code, json.dumps(response.json(), ensure_ascii=False).encode('utf8'))
        else:
            # 401 Unauthorized (while we use same credentials for all tests then we want to end after the first test returns bad authentification)
            # 500 Internal Server Error (could happen that IBM Cloud has several issue and is not able to handle incoming requests, then it would be probably same for all tests)
            # => We don't want to continue with testing.
            logger.critical('Unexpected response status (cannot continue with testing): %d, response: %s', response.status_code, json.dumps(response.json(), ensure_ascii=False).encode('utf8'))
            sys.exit(1)

        testCounter += 1

    outputFile.write(json.dumps(inputJson, indent=4, ensure_ascii=False) + '\n')
    logger.info('FINISHING: '+ os.path.basename(__file__))
Exemplo n.º 6
0
def main(argv):
    parser = argparse.ArgumentParser(
        description=
        'Tests all dialog flows from given file and save received responses to output file',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # positional arguments
    parser.add_argument(
        'inputFileName',
        help=
        'file with test jsons to be sent to conversation service. (One at each line at key \'input\'.)'
    )
    parser.add_argument(
        'outputFileName',
        help=
        'file where to store received data from conversation service. (One response at each line.)'
    )
    # optional arguments
    parser.add_argument('-c',
                        '--common_configFilePaths',
                        help='configuaration file',
                        action='append')
    parser.add_argument('-v',
                        '--verbose',
                        required=False,
                        help='verbosity',
                        action='store_true')
    parser.add_argument('--log',
                        type=str.upper,
                        default=None,
                        choices=list(logging._levelToName.values()))
    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    config = Cfg(args)

    workspacesUrl = getRequiredParameter(config, 'conversation_url')
    version = getRequiredParameter(config, 'conversation_version')
    username = getRequiredParameter(config, 'conversation_username')
    password = getRequiredParameter(config, 'conversation_password')
    workspaces = filterWorkspaces(
        config, getWorkspaces(workspacesUrl, version, username, password))
    if len(workspaces) > 1:
        # if there is more than one workspace with the same name -> error
        logger.error(
            'There are more than one workspace with this name, do not know which one to test.'
        )
        exit(1)
    elif len(workspaces) == 1:
        workspaceId = workspaces[0]['workspace_id']
    else:
        logger.error('There is no workspace with this name, cannot test it.')
        exit(1)

    # wait until workspace is done with training
    checkWorkspaceTime = 0
    requestUrl = workspacesUrl + '/' + workspaceId + '?version=' + version
    while True:
        logger.verbose("requestUrl: %s", requestUrl)
        response = requests.get(requestUrl, auth=(username, password))
        if response.status_code == 200:
            responseJson = response.json()
            if errorsInResponse(responseJson):
                sys.exit(1)
            logger.verbose("response: %s", responseJson)
            status = responseJson['status']
            logger.info('WCS WORKSPACE STATUS: %s', status)
            if status == 'Available':
                break
            else:
                # sleep some time and check messages again
                if checkWorkspaceTime > CHECK_WORKSPACE_TIME_MAX:
                    logger.error(
                        'Workspace have not become available before timeout, timeout: %d, response: %s',
                        CHECK_MESSAGES_TIME_MAX,
                        json.dumps(responseJson,
                                   indent=4,
                                   sort_keys=True,
                                   ensure_ascii=False).encode('utf8'))
                    sys.exit(1)
                time.sleep(CHECK_WORKSPACE_TIME_DELAY)
                checkWorkspaceTime = checkWorkspaceTime + CHECK_WORKSPACE_TIME_DELAY
        elif response.status_code == 400:
            logger.error('WA not available.')
            sys.exit(1)
        else:
            logger.error('Unknown status code:%s.', response.status_code)

    # run tests
    url = workspacesUrl + '/' + workspaceId + '/message?version=' + version
    receivedOutputJson = []
    try:
        with openFile(args.inputFileName, "r") as inputFile:
            try:
                with openFile(args.outputFileName, "w") as outputFile:
                    first = True
                    dialogId = ""
                    # for every input line
                    for inputLine in inputFile:
                        loadedJson = json.loads(inputLine)
                        inputJson = loadedJson[
                            'input_message']  # input json for tests
                        if dialogId and dialogId == loadedJson['dialog_id']:
                            if receivedOutputJson and 'context' in receivedOutputJson and receivedOutputJson[
                                    'context']:
                                inputJson['context'] = receivedOutputJson[
                                    'context']  # use context from last dialog turn
                        dialogId = loadedJson['dialog_id']
                        logger.verbose("url: %s", url)
                        response = requests.post(
                            url,
                            auth=(username, password),
                            headers={'Content-Type': 'application/json'},
                            data=json.dumps(inputJson,
                                            indent=4,
                                            ensure_ascii=False).encode('utf8'))
                        if response.status_code == 200:
                            receivedOutputJson = response.json()
                            if not first:
                                outputFile.write("\n")
                            outputFile.write(
                                json.dumps(receivedOutputJson,
                                           ensure_ascii=False))
                            first = False
                        elif response.status_code == 400:
                            logger.error('Error while testing.')
                            errorsInResponse(response.json())
                            sys.exit(1)
                        else:
                            logger.error('Unknown status code:%s.',
                                         response.status_code)
                            sys.exit(1)
            except IOError:
                logger.error('Cannot open test output file %s',
                             args.outputFileName)
                sys.exit(1)
    except IOError:
        logger.error('Cannot open test input file %s', args.inputFileName)
        sys.exit(1)

    logger.info('FINISHING: ' + os.path.basename(__file__))
    def test_deleteById(self, envVarNameUsername, envVarNamePassword):
        """Tests if workspace can be deleted by its id."""

        # use outputPath instead of dataBasePath when workspace_deploy script will be able to take workspace
        # and config file from different directories (workspace should be taken from
        # dataBasePath and config should be saved to outputs directory)
        createOutputConfigFilename = 'createWorkspaceOutput.cfg'
        createOutputConfigPath = os.path.abspath(
            os.path.join(self.dataBasePath, createOutputConfigFilename))
        deleteOutputConfigFilename = 'deleteWorkspaceOutput.cfg'
        deleteOutputConfigPath = os.path.abspath(
            os.path.join(self.dataBasePath, deleteOutputConfigFilename))

        workspaceName = 'deleteById_workspace'

        # deploy test workspace
        deployParams = list(self.deployParamsBase)
        deployParams.extend([
            '--common_output_config', createOutputConfigPath,
            '--conversation_workspace_name', workspaceName
        ])
        workspace_deploy.main(deployParams)
        # deploy one more workspace
        deployParamsMore = list(self.deployParamsBase)
        deployParamsMore.extend(
            ['--conversation_workspace_name', workspaceName])
        workspace_deploy.main(deployParamsMore)

        # try to delete workspace by its id (id is obtained from output config of deploy script)
        deleteParams = list(self.deleteParamsBase)
        deleteParams.extend([
            '-c', createOutputConfigPath, '--common_output_config',
            deleteOutputConfigPath
        ])
        self.t_noExceptionAndLogMessage(
            "One workspace has been successfully deleted", [deleteParams])

        # parse output config of deploy script (contains workspace id to delete)
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            '--common_configFilePaths',
                            help='configuaration file',
                            action='append')
        args = parser.parse_args(
            ['--common_configFilePaths', createOutputConfigPath])
        createOutputConfig = Cfg(args)

        workspaces = getWorkspaces(self.workspacesUrl, self.version,
                                   self.username, self.password)

        # in workspaces on server there should be no workspace with id from config file
        workspacesFound = 0
        for workspace in workspaces:
            if workspace['workspace_id'] == getRequiredParameter(
                    createOutputConfig, 'conversation_workspace_id'):
                workspacesFound += 1

        assert workspacesFound == 0

        # there should be still one workspace left (even with the same name)
        assert len(workspaces) == 1

        # check if workspace_id is not present in the output config of delete script
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            '--common_configFilePaths',
                            help='configuaration file',
                            action='append')
        args = parser.parse_args(
            ['--common_configFilePaths', deleteOutputConfigPath])
        deleteOutputConfig = Cfg(args)

        assert hasattr(deleteOutputConfig,
                       'conversation_workspace_id') == False
def main(argv):
    parser = argparse.ArgumentParser(description="Deploys a workspace in json format\
     to the Watson Conversation Service. If there is no 'conversation_workspace_id' provided\
     and the 'conversation_workspace_name_unique' is set to 'true', it uploads\
     a workspace to the place specified by the 'conversation_workspace_name'"                                                                             ,\
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-of',
                        '--common_outputs_directory',
                        required=False,
                        help='directory where the otputs are stored')
    parser.add_argument('-ow',
                        '--common_outputs_workspace',
                        required=False,
                        help='name of the json file with workspace')
    parser.add_argument('-c',
                        '--common_configFilePaths',
                        help='configuaration file',
                        action='append')
    parser.add_argument('-oc',
                        '--common_output_config',
                        help='output configuration file')
    parser.add_argument('-cu',
                        '--conversation_url',
                        required=False,
                        help='url of the conversation service API')
    parser.add_argument('-cv',
                        '--conversation_version',
                        required=False,
                        help='version of the conversation service API')
    parser.add_argument('-cn',
                        '--conversation_username',
                        required=False,
                        help='username of the conversation service instance')
    parser.add_argument('-cp',
                        '--conversation_password',
                        required=False,
                        help='password of the conversation service instance')
    parser.add_argument(
        '-cid',
        '--conversation_workspace_id',
        required=False,
        help=
        'workspace_id of the application. If a workspace id is provided, previous workspace content is overwritten, otherwise a new workspace is created '
    )
    parser.add_argument('-wn',
                        '--conversation_workspace_name',
                        required=False,
                        help='name of the workspace')
    parser.add_argument(
        '-wnu',
        '--conversation_workspace_name_unique',
        required=False,
        help=
        'true if the workspace name should be unique across apecified assistant'
    )
    parser.add_argument('-v',
                        '--verbose',
                        required=False,
                        help='verbosity',
                        action='store_true')
    parser.add_argument('--log',
                        type=str.upper,
                        default=None,
                        choices=list(logging._levelToName.values()))
    args = parser.parse_args(argv)

    if __name__ == '__main__':
        setLoggerConfig(args.log, args.verbose)

    config = Cfg(args)
    logger.info('STARTING: ' + os.path.basename(__file__))

    # workspace info
    try:
        workspaceFilePath = os.path.join(
            getRequiredParameter(config, 'common_outputs_directory'),
            getRequiredParameter(config, 'common_outputs_workspace'))
        with openFile(workspaceFilePath, 'r') as workspaceFile:
            workspace = json.load(workspaceFile)
    except IOError:
        logger.error('Cannot load workspace file %s', workspaceFilePath)
        sys.exit(1)
    # workspace name
    workspaceName = getOptionalParameter(config, 'conversation_workspace_name')
    if workspaceName: workspace['name'] = workspaceName
    # workspace language
    workspaceLanguage = getOptionalParameter(config, 'conversation_language')
    if workspaceLanguage: workspace['language'] = workspaceLanguage

    # credentials (required)
    username = getRequiredParameter(config, 'conversation_username')
    password = getRequiredParameter(config, 'conversation_password')
    # url (required)
    workspacesUrl = getRequiredParameter(config, 'conversation_url')
    # version (required)
    version = getRequiredParameter(config, 'conversation_version')
    # workspace id
    workspaces = filterWorkspaces(
        config, getWorkspaces(workspacesUrl, version, username, password))
    if len(workspaces) > 1:
        # if there is more than one workspace with the same name -> error
        logger.error(
            'There are more than one workspace with this name, do not know which one to update.'
        )
        exit(1)
    elif len(workspaces) == 1:
        workspaceId = workspaces[0]['workspace_id']
        logger.info("Updating existing workspace.")
    else:
        workspaceId = ""
        logger.info("Creating new workspace.")

    requestUrl = workspacesUrl + '/' + workspaceId + '?version=' + version

    # create/update workspace
    response = requests.post(requestUrl,
                             auth=(username, password),
                             headers={'Content-Type': 'application/json'},
                             data=json.dumps(workspace, indent=4))
    responseJson = response.json()

    logger.verbose("response: %s", responseJson)
    if not errorsInResponse(responseJson):
        logger.info('Workspace successfully uploaded.')
    else:
        logger.error('Cannot upload workspace.')
        sys.exit(1)

    if not getOptionalParameter(config, 'conversation_workspace_id'):
        setattr(config, 'conversation_workspace_id',
                responseJson['workspace_id'])
        logger.info('WCS WORKSPACE_ID: %s', responseJson['workspace_id'])

    outputConfigFile = getOptionalParameter(config, 'common_output_config')
    if outputConfigFile:
        config.saveConfiguration(outputConfigFile)

    clientName = getOptionalParameter(config, 'context_client_name')
    if clientName:
        # Assembling uri of the client
        clientv2URL = 'https://clientv2-latest.mybluemix.net/#defaultMinMode=true'
        clientv2URL += '&prefered_workspace_id=' + getattr(
            config, 'conversation_workspace_id')
        clientv2URL += '&prefered_workspace_name=' + getattr(
            config, 'conversation_workspace_name')
        clientv2URL += '&shared_examples_service=&url=http://zito.mybluemix.net'
        clientv2URL += '&username='******'conversation_username')
        clientv2URL += '&custom_ui.title=' + getattr(
            config, 'conversation_workspace_name')
        clientv2URL += '&password='******'conversation_password')
        clientv2URL += '&custom_ui.machine_img='
        clientv2URL += '&custom_ui.user_img='
        clientv2URL += '&context.user_name=' + getattr(config,
                                                       'context_client_name')
        clientv2URL += '&context.link_build_date=' + unicode(
            datetime.datetime.now().strftime("%y-%m-%d-%H-%M"))
        clientv2URL += '&prefered_tts=none'
        clientv2URL += '&bluemix_tts.username=xx'
        clientv2URL += '&bluemix_tts.password=xx'
        clientv2URL += '&compact_mode=true'
        clientv2URL += '&compact_switch_enabled=true'
        clientv2URL += 'developer_switch_enabled=false'
        logger.info('clientv2URL=%s', clientv2URL)

        # create file with automatic redirect
        clientFileName = getOptionalParameter(config, 'common_outputs_client')
        if clientFileName:
            clientFilePath = os.path.join(
                getRequiredParameter(config, 'common_outputs_directory'),
                clientFileName)
            try:
                with openFile(clientFilePath, "w") as clientFile:
                    clientFile.write(
                        '<meta http-equiv="refresh" content=\"0; url=' +
                        clientv2URL + '\" />')
                    clientFile.write('<p><a href=\"' + clientv2URL +
                                     '\">Redirect</a></p>')
                clientFile.close()
            except IOError:
                logger.error('Cannot write to %s', clientFilePath)
                sys.exit(1)

    logger.info('FINISHING: ' + os.path.basename(__file__))