Beispiel #1
0
def main():
    parser = SDLParser()
    mainTree = parser.parse(sys.argv[1])

    print(mainTree)
    return

    treeToLookFor = parser.parse(sys.argv[2])
    subThisTree = parser.parse(sys.argv[3])
    makeTreeReplacement(mainTree, treeToLookFor, subThisTree)

    str1 = str(mainTree)
    str2 = str(treeToLookFor)
    str3 = str(subThisTree)
    x = str1.replace(str2, str3)
    print(x)
Beispiel #2
0
def main():
    parser = SDLParser()
    mainTree = parser.parse(sys.argv[1])

    print(mainTree)
    return

    treeToLookFor = parser.parse(sys.argv[2])
    subThisTree = parser.parse(sys.argv[3])
    makeTreeReplacement(mainTree, treeToLookFor, subThisTree)


    str1 = str(mainTree)
    str2 = str(treeToLookFor)
    str3 = str(subThisTree)
    x = str1.replace(str2, str3)
    print(x)
Beispiel #3
0
                    else:
                        print("ERROR: Invalid SDL statement!")
                        sys.exit(-1)
                else:
                    print("Need type annotation for list type: ", listKey)
            else:
                # check context of LHS assignment:
                varType[varName] = new_type


# need to somehow handle list types => Str, Int, ZR, G1, G2, GT types

#eq1 = M.evaluate(exp(G1, mul(GT, GT)))
#print("Test 8: ", eq1)
#
#print("Test 9: ", M.evaluate(asym_pair(G1, G2)))
#
#print("Test 10: ", )

if __name__ == "__main__":
    tc = TypeCheck()
    tc.setupSolver()
    parser = sdl.SDLParser()
    varType = {}  #{'tf0':listG1, 'tf1':listG1}
    args = sys.argv[1:]
    for i in args:
        binNode = parser.parse(i)
        tc.inferType(binNode, varType)

    print("VarTypes: ", varType)
Beispiel #4
0
class SubstituteVar:
    def __init__(self, target, new_var):
        self.target = target
        self.new_var = new_var

    def visit(self, node, data):
        pass

    def visit_attr(self, node, data):
        if str(node) == self.target:
            node.setAttribute(self.new_var)


if __name__ == "__main__":
    statement = sys.argv[1]
    parser = SDLParser()
    equation = parser.parse(statement)

    print("Original: ", equation)
    tech2 = Technique2()
    ASTVisitor(tech2).preorder(equation)
    print("Tech 2: ", equation)

    tech1 = Technique1()
    ASTVisitor(tech1).preorder(equation)
    print("Tech 1: ", equation)

    tech3 = Technique3()
    ASTVisitor(tech3).preorder(equation)
    print("Tech 3: ", equation)
Beispiel #5
0
                print("T1 left :=", self.T1)

class SubstituteVar:
    def __init__(self, target, new_var):
        self.target = target
        self.new_var = new_var
    def visit(self, node, data):
        pass
    
    def visit_attr(self, node, data):
        if str(node) == self.target:
            node.setAttribute(self.new_var)

if __name__ == "__main__":
    statement = sys.argv[1]
    parser = SDLParser()
    equation = parser.parse(statement)

    print("Original: ", equation)
    tech2 = Technique2()
    ASTVisitor(tech2).preorder(equation)
    print("Tech 2: ", equation)
        
    tech1 = Technique1()
    ASTVisitor(tech1).preorder(equation)
    print("Tech 1: ", equation)
    
    tech3 = Technique3()
    ASTVisitor(tech3).preorder(equation)
    print("Tech 3: ", equation)
    
Beispiel #6
0
def parseReductionFile(cm, reduction_file, verbose, benchmarkOpt, estimateOpt):
    # setup sdl parser configs
    sdl.masterPubVars = cm.reducMasterPubVars
    sdl.masterSecVars = cm.reducMasterSecVars
    if not hasattr(cm, "schemeType"):
        sys.exit("configAutoGroup: need to set 'schemeType' in config.")

    if cm.schemeType == PKENC:
        funcOrder = [
            cm.reducSetupFuncName, cm.reducQueryFuncName,
            cm.reducChallengeFuncName
        ]
        setattr(cm, functionOrder, funcOrder)
    elif cm.schemeType == PKSIG:
        funcOrder = [cm.reducSetupFuncName, cm.reducQueryFuncName]
        setattr(cm, functionOrder, funcOrder)
    else:
        sys.exit("configAutoGroup: unrecognized 'schemeType' in config.")

    #TODO: create something like this for assumption?
    #for i in encConfigParams:
    #    if not hasattr(cm, i):
    #        errorOut(i)

    if not hasattr(cm, "secparam"):
        secparam = "BN256"  # default pairing curve for now
    else:
        secparam = cm.secparam

    #do we need this for the assumption?
    dropFirst = None
    if hasattr(cm, "dropFirst"):
        dropFirst = cm.dropFirst

    options = {
        'secparam': secparam,
        'userFuncList': [],
        'computeSize': estimateOpt,
        'dropFirst': dropFirst,
        'path': dest_path
    }

    sdl.parseFile(reduction_file, verbose, ignoreCloudSourcing=True)
    assignInfo_reduction = sdl.getAssignInfo()
    reductionData = {
        'sdl_name':
        sdl.assignInfo[sdl.NONE_FUNC_NAME]
        [BV_NAME].getAssignNode().getRight().getAttribute(),
        'setting':
        sdl.assignInfo[sdl.NONE_FUNC_NAME]
        [ALGEBRAIC_SETTING].getAssignNode().getRight().getAttribute(),
        'assignInfo':
        assignInfo_reduction,
        'typesBlock':
        sdl.getFuncStmts(TYPES_HEADER),
        'userCodeBlocks':
        list(
            set(list(assignInfo_reduction.keys())).difference(
                cm.functionOrder + [TYPES_HEADER, NONE_FUNC_NAME]))
    }

    if hasattr(cm, "reductionMap"):
        reductionData['varmap'] = cm.reductionMap

    # this consists of the type of the input scheme (e.g., symmetric)
    setting = sdl.assignInfo[sdl.NONE_FUNC_NAME][
        ALGEBRAIC_SETTING].getAssignNode().getRight().getAttribute()
    # name of the scheme
    sdl_name = sdl.assignInfo[
        sdl.NONE_FUNC_NAME][BV_NAME].getAssignNode().getRight().getAttribute()

    typesBlock = sdl.getFuncStmts(TYPES_HEADER)
    info = {'verbose': verbose}

    # we want to ignore user defined functions from our analysis
    # (unless certain variables that we care about are manipulated there)
    userCodeBlocks = list(
        set(list(assignInfo_reduction.keys())).difference(
            cm.functionOrder + [TYPES_HEADER, NONE_FUNC_NAME]))
    options['userFuncList'] += userCodeBlocks

    lines = list(typesBlock[0].keys())
    lines.sort()
    typesBlockLines = [
        i.rstrip() for i in sdl.getLinesOfCodeFromLineNos(lines)
    ]
    begin = ["BEGIN :: " + TYPES_HEADER]
    end = ["END :: " + TYPES_HEADER]

    # start constructing the preamble for the Asymmetric SDL output
    newLines0 = [
        BV_NAME + " := " + sdl_name, SETTING + " := " + sdl.ASYMMETRIC_SETTING
    ]
    newLines1 = begin + typesBlockLines + end
    # this fact is already verified by the parser
    # but if scheme claims symmetric
    # and really an asymmetric scheme then parser will
    # complain.
    assert setting == sdl.SYMMETRIC_SETTING, "No need to convert to asymmetric setting."
    # determine user preference in terms of keygen or encrypt
    short = SHORT_DEFAULT  # default option
    if hasattr(cm, 'short'):
        if cm.short in SHORT_OPTIONS:
            short = cm.short
    print("reducing size of '%s'" % short)

    varTypes = dict(sdl.getVarTypes().get(TYPES_HEADER))
    typesH = dict(varTypes)

    reductionData['typesH'] = typesH

    if not hasattr(cm, 'schemeType'):
        sys.exit("'schemeType' option missing in specified config file.")
    pairingSearch = []
    # extract the statements, types, dependency list, influence list and exponents of influence list
    # for each algorithm in the SDL scheme
    if cm.schemeType == PKENC:
        (stmtS, typesS, depListS, depListNoExpS, infListS,
         infListNoExpS) = sdl.getVarInfoFuncStmts(cm.reducSetupFuncName)
        (stmtQ, typesQ, depListQ, depListNoExpQ, infListQ,
         infListNoExpQ) = sdl.getVarInfoFuncStmts(cm.reducQueryFuncName)
        (stmtC, typesC, depListC, depListNoExpC, infListC,
         infListNoExpC) = sdl.getVarInfoFuncStmts(cm.reducChallengeFuncName)
        depListData = {
            cm.reducChallengeFuncName: depListNoExpC,
            cm.reducQueryFuncName: depListNoExpQ,
            cm.reducSetupFuncName: depListNoExpS
        }
        varTypes.update(typesS)
        varTypes.update(typesQ)
        varTypes.update(typesC)

        if hasattr(cm, 'graphit') and cm.graphit and cm.single_reduction:
            dg_reduc_setup = generateGraphForward(
                cm.reducSetupFuncName, (stmtS, typesS, infListNoExpS))
            dg_reduc_setup.adjustByMap(reductionData.get('varmap'))
            dg_reduc_query = generateGraph(cm.reducQueryFuncName,
                                           (typesQ, depListNoExpQ), types.G1,
                                           varTypes)
            dg_reduc_query.adjustByMap(reductionData.get('varmap'))
            dg_reduc_chall = generateGraph(cm.reducChallengeFuncName,
                                           (typesC, depListNoExpC), types.G1,
                                           varTypes)
            dg_reduc_chall.adjustByMap(reductionData.get('varmap'))

            if verbose:
                print("<=== Reduction Setup Graph ===>")
                print(dg_reduc_setup)
                print("<=== Reduction Setup Graph ===>")

                print("<=== Reduction Query Graph ===>")
                print(dg_reduc_query)
                print("<=== Reduction Query Graph ===>")

                print("<=== Reduction Challenge Graph ===>")
                print(dg_reduc_chall)
                print("<=== Reduction Challenge Graph ===>")

            dg_reduction = DotGraph("reduction")
            dg_reduction += dg_reduc_setup + dg_reduc_query + dg_reduc_chall
            if verbose:
                print("<=== Reduction Graph ===>")
                print(dg_reduction)
                print("<=== Reduction Graph ===>")

            reductionData['reductionGraph'] = dg_reduction

        # TODO: expand search to encrypt and potentially setup
        pairingSearch += [stmtS, stmtQ, stmtC]  # aka start with decrypt.

        info[curveID] = options['secparam']
        info[dropFirstKeyword] = options[dropFirstKeyword]
        gen = Generators(info)
        # JAA: commented out for benchmarking
        #print("List of generators for scheme")
        # retrieve the generators selected by the scheme
        # typically found in the setup routine in most cases.
        # extract the generators from the setup and keygen routine for later use
        if hasattr(cm, 'reducSetupFuncName'):
            gen.extractGens(stmtS, typesS)
        if hasattr(cm, 'reducQueryFuncName'):
            gen.extractGens(stmtQ, typesQ)
        if hasattr(cm, 'reducChallengeFuncName'):
            gen.extractGens(stmtC, typesC)
        else:
            sys.exit(
                "Assumption failed: setup not defined for this function. Where to extract generators?"
            )
        generators = gen.getGens()
        # JAA: commented out for benchmarking
        #print("Generators extracted: ", generators)
    elif cm.schemeType == PKSIG:
        (stmtS, typesS, depListS, depListNoExpS, infListS,
         infListNoExpS) = sdl.getVarInfoFuncStmts(cm.reducSetupFuncName)
        (stmtQ, typesQ, depListQ, depListNoExpQ, infListQ,
         infListNoExpQ) = sdl.getVarInfoFuncStmts(cm.reducQueryFuncName)
        depListData = {
            cm.reducQueryFuncName: depListNoExpQ,
            cm.reducSetupFuncName: depListNoExpS
        }

        varTypes.update(typesS)
        varTypes.update(typesQ)

        if hasattr(cm, 'graphit') and cm.graphit:
            dg_reduc_setup = generateGraphForward(
                cm.reducSetupFuncName, (stmtS, typesS, infListNoExpS))
            dg_reduc_setup.adjustByMap(reductionData.get('varmap'))

            #dg_reduc_query = generateGraphForward(cm.reducQueryFuncName, (stmtQ, typesQ, infListNoExpQ))
            #dg_reduc_query.adjustByMap(reductionData.get('varmap'))

            new_depListNoExpQ = simplifyDepMap(stmtQ, typesQ, infListNoExpQ,
                                               depListNoExpQ)
            dg_reduc_query = generateGraph(cm.reducQueryFuncName,
                                           (typesQ, new_depListNoExpQ),
                                           types.G1, varTypes)
            dg_reduc_query.adjustByMap(reductionData.get('varmap'))

            if verbose:
                print("<=== Reduction Setup Graph ===>")
                print(dg_reduc_setup)
                print("<=== Reduction Setup Graph ===>")

                print("<=== Reduction Query Graph (backward) ===>")
                print(dg_reduc_query)
                print("<=== Reduction Query Graph (backward) ===>")

            dg_reduction = DotGraph("reduction")
            dg_reduction += dg_reduc_setup + dg_reduc_query
            if verbose:
                print("<=== Reduction Graph ===>")
                print(dg_reduction)
                print("<=== Reduction Graph ===>")

            reductionData['reductionGraph'] = dg_reduction

        # TODO: expand search to encrypt and potentially setup
        pairingSearch += [stmtS, stmtQ]  # aka start with decrypt.

        info[curveID] = options['secparam']
        info[dropFirstKeyword] = options[dropFirstKeyword]
        gen = Generators(info)
        # JAA: commented out for benchmarking
        #print("List of generators for scheme")
        # retrieve the generators selected by the scheme
        # typically found in the setup routine in most cases.
        # extract the generators from the setup and keygen routine for later use
        if hasattr(cm, 'reducSetupFuncName'):
            gen.extractGens(stmtS, typesS)
        if hasattr(cm, 'reducQueryFuncName'):
            gen.extractGens(stmtQ, typesQ)
        else:
            sys.exit(
                "Assumption failed: setup not defined for this function. Where to extract generators?"
            )
        generators = gen.getGens()
        # JAA: commented out for benchmarking
        #print("Generators extracted: ", generators)

    # need a Visitor class to build these variables
    # TODO: expand to other parts of algorithm including setup, keygen, encrypt
    # Visits each pairing computation in the SDL and
    # extracts the inputs. This is the beginning of the
    # analysis of these variables as the SDL is converted into
    # an asymmetric scheme.
    hashVarList = []
    pair_vars_G1_lhs = []
    pair_vars_G1_rhs = []
    gpv = GetPairingVariables(pair_vars_G1_lhs, pair_vars_G1_rhs)
    gpv.setDepListData(depListData)
    for eachStmt in pairingSearch:  # loop through each pairing statement
        lines = eachStmt.keys()  # for each line, do the following
        for i in lines:
            if type(eachStmt[i]
                    ) == sdl.VarInfo:  # make sure we have the Var Object
                # assert that the statement contains a pairing computation
                gpv.setFuncName(eachStmt[i].getFuncName())
                if HasPairings(eachStmt[i].getAssignNode()):
                    path_applied = []
                    # split pairings if necessary so that we don't influence
                    # the solve in anyway. We can later recombine these during
                    # post processing of the SDL
                    eachStmt[i].assignNode = SplitPairings(
                        eachStmt[i].getAssignNode(), path_applied)
                    # JAA: commented out for benchmarking
                    #if len(path_applied) > 0: print("Split Pairings: ", eachStmt[i].getAssignNode())
                    if info['verbose']:
                        print("Each: ", eachStmt[i].getAssignNode())
                    #print(eachStmt[i].assignNode)
                    sdl.ASTVisitor(gpv).preorder(eachStmt[i].getAssignNode())
                elif eachStmt[i].getHashArgsInAssignNode():
                    # in case there's a hashed value...build up list and check later to see if it appears
                    # in pairing variable list
                    hashVarList.append(str(eachStmt[i].getAssignVar()))
                else:
                    continue  # not interested

    # constraint list narrows the solutions that
    # we care about
    constraintList = []
    # for example, include any hashed values that show up in a pairing by default
    for i in hashVarList:
        if i in pair_vars_G1_lhs or i in pair_vars_G1_rhs:
            constraintList.append(i)
    # JAA: commented out for benchmarking
    # for each pairing variable, we construct a dependency graph all the way back to
    # the generators used. The input of assignTraceback consists of the list of SDL statements,
    # generators from setup, type info, and the pairing variables.
    # We do this analysis for both sides
    info['G1_lhs'] = (pair_vars_G1_lhs,
                      assignTraceback(assignInfo_reduction, generators,
                                      varTypes, pair_vars_G1_lhs,
                                      constraintList))
    info['G1_rhs'] = (pair_vars_G1_rhs,
                      assignTraceback(assignInfo_reduction, generators,
                                      varTypes, pair_vars_G1_rhs,
                                      constraintList))

    depList = {}
    depListUnaltered = {}

    if cm.schemeType == PKENC:
        for i in [depListS, depListQ, depListC]:
            for (key, val) in i.items():
                if (not (len(val) == 0) and not (key == 'input')
                        and not (key == 'output')
                        and not (key == cm.reducCiphertextVar)
                        and not (key == cm.reducQueriesSecVar)
                        and not (key in cm.reducMasterPubVars)
                        and not (key in cm.reducMasterSecVars)):
                    if (key in reductionData['varmap']):
                        depList[reductionData['varmap'][key]] = val
                        depListUnaltered[key] = val
                    else:
                        depList[key] = val
                        depListUnaltered[key] = val
    elif cm.schemeType == PKSIG:
        for i in [depListS, depListQ]:
            for (key, val) in i.items():
                if (not (len(val) == 0) and not (key == 'input')
                        and not (key == 'output')
                        and not (key == cm.reducCiphertextVar)
                        and not (key == cm.reducQueriesSecVar)
                        and not (key in cm.reducMasterPubVars)
                        and not (key in cm.reducMasterSecVars)):
                    if (key in reductionData['varmap']):
                        depList[reductionData['varmap'][key]] = val
                        depListUnaltered[key] = val
                    else:
                        depList[key] = val
                        depListUnaltered[key] = val

    info['deps'] = (depListUnaltered,
                    assignTraceback(assignInfo_reduction, generators, varTypes,
                                    depListUnaltered, constraintList))

    prunedDeps = {}
    for (key, val) in info['deps'][1].items():
        if (not (len(val) == 0)):
            prunedDeps[key] = val

    the_map = gpv.pairing_map

    reductionData['info'] = info
    reductionData['depList'] = depList
    reductionData['deps'] = info['deps']
    reductionData['prunedMap'] = prunedDeps

    reductionData['G1_lhs'] = info['G1_lhs']
    reductionData['G1_rhs'] = info['G1_rhs']

    reductionData['the_map'] = the_map

    reductionData['options'] = options

    reductionData['varTypes'] = varTypes

    #prune varTypes to remove ZR that we don't care about
    additionalDeps = dict(list(reductionData['info']['deps'][0].items()))
    items = []
    newlist = []
    newDeps = {}
    for (key, val) in additionalDeps.items():
        #items = list(additionalDeps[key])
        newlist = []
        for j in val:
            if ((sdl.getVarTypeFromVarName(j, None, True) == types.G1)
                    or (sdl.getVarTypeFromVarName(j, None, True) == types.G2)):
                newlist.append(j)
        if (not (len(set(newlist)) == 0)):
            if (key in reductionData['varmap']):
                newDeps[reductionData['varmap'][key]] = set(newlist)
            else:
                newDeps[key] = set(newlist)
            #newDeps[key] = set(newlist)
    reductionData['newDeps'] = newDeps

    reductionData['options']['type'] = "reduction"

    reductionData['reductionFile'] = reduction_file

    if cm.schemeType == PKENC and not cm.single_reduction:
        if hasattr(cm, 'graphit') and cm.graphit:
            exclude_list = [cm.reducQueriesSecVar
                            ] + cm.reducMasterPubVars + cm.reducMasterSecVars

            dg_reduc_setup = generateGraphForward(
                cm.reducSetupFuncName, (stmtS, typesS, infListNoExpS))
            dg_reduc_setup.adjustByMap(reductionData.get('varmap'))
            # process the query
            dg_reduc_query = generateGraph(
                cm.reducQueryFuncName, (typesQ, depListNoExpQ), types.G1,
                varTypes)  #, stmts=stmtQ, infListNoExp=infListNoExpQ)
            dg_reduc_query.adjustByMap(reductionData.get('varmap'))

            try:
                newVarType = dict(typesS)
                newVarType.update(typesQ)
                # special variables that we don't want in the graph
                dg_reduc_query_forward = generateGraphForward(
                    cm.reducQueryFuncName, (stmtQ, newVarType, infListNoExpQ),
                    exclude=exclude_list)
                dg_reduc_query_forward.adjustByMap(reductionData.get('varmap'))
                # combine with backward analysis
                dg_reduc_query += dg_reduc_query_forward
            except Exception as e:
                print("EXCEPTION: ", cm.reducQueryFuncName,
                      " forward tracing failed!")
                print(e.traceback())

            dg_reduc_chall = generateGraph(cm.reducChallengeFuncName,
                                           (typesC, depListNoExpC), types.G1,
                                           varTypes)
            dg_reduc_chall.adjustByMap(reductionData.get('varmap'))

            try:
                newVarType.update(typesC)
                dg_reduc_chall_forward = generateGraphForward(
                    cm.reducChallengeFuncName,
                    (stmtC, newVarType, infListNoExpC),
                    exclude=exclude_list)
                dg_reduc_chall_forward.adjustByMap(reductionData.get('varmap'))
                # combine with backward analysis
                dg_reduc_chall += dg_reduc_chall_forward
            except Exception as e:
                print("EXCEPTION: ", cm.reducChallengeFuncName,
                      " forward tracing failed!")
                print(e.traceback())

            if verbose:
                print("<=== Reduction Setup Graph ===>")
                print(dg_reduc_setup)
                print("<=== Reduction Setup Graph ===>")

                print("<=== Reduction Query Graph ===>")
                print(dg_reduc_query)
                print("<=== Reduction Query Graph ===>")

                print("<=== Reduction Challenge Graph ===>")
                print(dg_reduc_chall)
                print("<=== Reduction Challenge Graph ===>")

            dg_reduction = DotGraph("reduction")
            dg_reduction += dg_reduc_setup + dg_reduc_query + dg_reduc_chall
            if verbose:
                print("<=== Reduction Graph ===>")
                print(dg_reduction)
                print("<=== Reduction Graph ===>")

            reductionData['reductionGraph'] = dg_reduction

    #if hasattr(cm, "assumption_reduction_map"):
    #    reductionData['assumption'] = cm.assumption_reduction_map[reduction_name]
    #else:
    #    reductionData['assumption'] = ""

    return reductionData
Beispiel #7
0
def parseAssumptionFile(cm, assumption_file, verbose, benchmarkOpt, estimateOpt):
    # setup sdl parser configs
    sdl.masterPubVars = cm.assumpMasterPubVars
    sdl.masterSecVars = cm.assumpMasterSecVars
    if not hasattr(cm, "schemeType"):
        sys.exit("configAutoGroup: need to set 'schemeType' in config.")

    #setattr(cm, isAssumption, "true")

    funcOrder = [cm.assumpSetupFuncName, cm.assumpFuncName]
    setattr(cm, functionOrder, funcOrder)

    #TODO: create something like this for assumption?
    #for i in encConfigParams:
    #    if not hasattr(cm, i):
    #        errorOut(i)
    
    if not hasattr(cm, "secparam"):
        secparam = "BN256" # default pairing curve for now
    else:
        secparam = cm.secparam
    
    #do we need this for the assumption?
    dropFirst = None
    if hasattr(cm, "dropFirst"):
        dropFirst = cm.dropFirst
    
    options = {'secparam':secparam, 'userFuncList':[], 'computeSize':estimateOpt, 'dropFirst':dropFirst, 'path':dest_path}

    sdl.parseFile(assumption_file, verbose, ignoreCloudSourcing=True)
    assignInfo_assump = sdl.getAssignInfo()
    assumptionData = {'sdl_name':sdl.assignInfo[sdl.NONE_FUNC_NAME][BV_NAME].getAssignNode().getRight().getAttribute(), 'setting':sdl.assignInfo[sdl.NONE_FUNC_NAME][ALGEBRAIC_SETTING].getAssignNode().getRight().getAttribute(), 'assignInfo':assignInfo_assump, 'typesBlock':sdl.getFuncStmts( TYPES_HEADER ), 'userCodeBlocks':list(set(list(assignInfo_assump.keys())).difference(cm.functionOrder + [TYPES_HEADER, NONE_FUNC_NAME]))}

    if hasattr(cm, "reductionMap"):
        assumptionData['varmap'] = cm.reductionMap

    # this consists of the type of the input scheme (e.g., symmetric)
    setting = sdl.assignInfo[sdl.NONE_FUNC_NAME][ALGEBRAIC_SETTING].getAssignNode().getRight().getAttribute()
    # name of the scheme
    sdl_name = sdl.assignInfo[sdl.NONE_FUNC_NAME][BV_NAME].getAssignNode().getRight().getAttribute()

    typesBlock = sdl.getFuncStmts( TYPES_HEADER )
    info = {'verbose':verbose}

    # we want to ignore user defined functions from our analysis
    # (unless certain variables that we care about are manipulated there)
    userCodeBlocks = list(set(list(assignInfo_assump.keys())).difference(cm.functionOrder + [TYPES_HEADER, NONE_FUNC_NAME]))
    options['userFuncList'] += userCodeBlocks

    lines = list(typesBlock[0].keys())
    lines.sort()
    typesBlockLines = [ i.rstrip() for i in sdl.getLinesOfCodeFromLineNos(lines) ]
    begin = ["BEGIN :: " + TYPES_HEADER]
    end = ["END :: " + TYPES_HEADER]

    # start constructing the preamble for the Asymmetric SDL output
    newLines0 = [ BV_NAME + " := " + sdl_name, SETTING + " := " + sdl.ASYMMETRIC_SETTING ] 
    newLines1 = begin + typesBlockLines + end
    # this fact is already verified by the parser
    # but if scheme claims symmetric
    # and really an asymmetric scheme then parser will
    # complain.
    assert setting == sdl.SYMMETRIC_SETTING, "No need to convert to asymmetric setting."    
    # determine user preference in terms of keygen or encrypt
    short = SHORT_DEFAULT # default option
    if hasattr(cm, 'short'):
        if cm.short in SHORT_OPTIONS:
            short = cm.short
    print("reducing size of '%s'" % short) 

    varTypes = dict(sdl.getVarTypes().get(TYPES_HEADER))
    typesH = dict(varTypes)

    assumptionData['typesH'] = typesH

    if not hasattr(cm, 'schemeType'):
        sys.exit("'schemeType' option missing in specified config file.")
    pairingSearch = []
    # extract the statements, types, dependency list, influence list and exponents of influence list
    # for each algorithm in the SDL scheme
    (stmtS, typesS, depListS, depListNoExpS, infListS, infListNoExpS) = sdl.getVarInfoFuncStmts( cm.assumpSetupFuncName )
    (stmtA, typesA, depListA, depListNoExpA, infListA, infListNoExpA) = sdl.getVarInfoFuncStmts( cm.assumpFuncName )
    varTypes.update(typesS)
    varTypes.update(typesA)

    assumptionData['stmtS'] = stmtS
    assumptionData['stmtA'] = stmtA

    if hasattr(cm, 'graphit') and cm.graphit:
        dg_assump_setup = generateGraph(cm.assumpSetupFuncName, (typesS, depListNoExpS), types.G1, varTypes)
        dg_assump_setup.adjustByMap(assumptionData.get('varmap'))
        dg_assump_itself = generateGraph(cm.assumpFuncName, (typesA, depListNoExpA), types.G1, varTypes)
        dg_assump_itself.adjustByMap(assumptionData.get('varmap'))

        dg_assumption = DotGraph("assumption")
        dg_assumption += dg_assump_setup + dg_assump_itself

        if verbose:
            print("<=== Assumption Instance Graph ===>")
            print(dg_assumption)
            print("<=== Assumption Instance Graph ===>")

        # always record these
        assumptionData['assumptionGraph'] = dg_assumption

    # TODO: expand search to encrypt and potentially setup
    pairingSearch += [stmtS, stmtA] # aka start with decrypt.
            
    info[curveID] = options['secparam']
    info[dropFirstKeyword] = options[dropFirstKeyword]
    gen = Generators(info)
    # JAA: commented out for benchmarking    
    #print("List of generators for scheme")
    # retrieve the generators selected by the scheme
    # typically found in the setup routine in most cases.
    # extract the generators from the setup and keygen routine for later use
    if hasattr(cm, 'assumpSetupFuncName'):
        gen.extractGens(stmtS, typesS)
    if hasattr(cm, 'assumpFuncName'):
        gen.extractGens(stmtA, typesA)
    else:
        sys.exit("Assumption failed: setup not defined for this function. Where to extract generators?")
    generators = gen.getGens()
    # JAA: commented out for benchmarking    
    #print("Generators extracted: ", generators)

    print("\n")

    # need a Visitor class to build these variables  
    # TODO: expand to other parts of algorithm including setup, keygen, encrypt
    # Visits each pairing computation in the SDL and
    # extracts the inputs. This is the beginning of the
    # analysis of these variables as the SDL is converted into
    # an asymmetric scheme.
    hashVarList = []
    pair_vars_G1_lhs = [] 
    pair_vars_G1_rhs = []    
    gpv = GetPairingVariables(pair_vars_G1_lhs, pair_vars_G1_rhs)
    for eachStmt in pairingSearch: # loop through each pairing statement
        lines = eachStmt.keys() # for each line, do the following
        for i in lines:
            if type(eachStmt[i]) == sdl.VarInfo: # make sure we have the Var Object
                # assert that the statement contains a pairing computation
                if HasPairings(eachStmt[i].getAssignNode()):
                    path_applied = []
                    # split pairings if necessary so that we don't influence
                    # the solve in anyway. We can later recombine these during
                    # post processing of the SDL
                    eachStmt[i].assignNode = SplitPairings(eachStmt[i].getAssignNode(), path_applied)
                    # JAA: commented out for benchmarking                    
                    #if len(path_applied) > 0: print("Split Pairings: ", eachStmt[i].getAssignNode())
                    if info['verbose']: print("Each: ", eachStmt[i].getAssignNode())
                    sdl.ASTVisitor( gpv ).preorder( eachStmt[i].getAssignNode() )
                elif eachStmt[i].getHashArgsInAssignNode(): 
                    # in case there's a hashed value...build up list and check later to see if it appears
                    # in pairing variable list
                    hashVarList.append(str(eachStmt[i].getAssignVar()))
                else:
                    continue # not interested
                
    # constraint list narrows the solutions that
    # we care about
    constraintList = []
    # for example, include any hashed values that show up in a pairing by default
    for i in hashVarList:
        if i in pair_vars_G1_lhs or i in pair_vars_G1_rhs:
            constraintList.append(i)
    # JAA: commented out for benchmarking            
    # for each pairing variable, we construct a dependency graph all the way back to
    # the generators used. The input of assignTraceback consists of the list of SDL statements,
    # generators from setup, type info, and the pairing variables.
    # We do this analysis for both sides
    info[ 'G1_lhs' ] = (pair_vars_G1_lhs, assignTraceback(assignInfo_assump, generators, varTypes, pair_vars_G1_lhs, constraintList))
    info[ 'G1_rhs' ] = (pair_vars_G1_rhs, assignTraceback(assignInfo_assump, generators, varTypes, pair_vars_G1_rhs, constraintList))

    depList = {}
    for i in [depListS, depListA]:
        for (key, val) in i.items():
            if(not(len(val) == 0) and not(key == 'input') and not(key == 'output') and not(key in cm.assumpMasterPubVars) and not(key in cm.assumpMasterSecVars)):
                depList[key] = val

    info[ 'deps' ] = (depList, assignTraceback(assignInfo_assump, generators, varTypes, depList, constraintList))

    prunedDeps = {}
    for (key, val) in info['deps'][1].items():
        if(not(len(val) == 0)):
            prunedDeps[key] = val

    the_map = gpv.pairing_map

    assumptionData['info'] = info
    assumptionData['depList'] = depList
    assumptionData['deps'] = info['deps']
    assumptionData['prunedMap'] = prunedDeps
    assumptionData['G1_lhs'] = info['G1_lhs']
    assumptionData['G1_rhs'] = info['G1_rhs']

    assumptionData['the_map'] = the_map

    assumptionData['options'] = options

    assumptionData['gpv'] = gpv

    assumptionData['gen'] = gen

    assumptionData['varTypes'] = varTypes

    #prune varTypes to remove ZR that we don't care about
    additionalDeps = dict(list(assumptionData['info']['deps'][0].items()))
    items = []
    newlist = []
    newDeps = {}
    for (key,val) in additionalDeps.items():
        #items = list(additionalDeps[key])
        newlist = []
        for j in val:
            if((sdl.getVarTypeFromVarName(j, None, True) == types.G1) or (sdl.getVarTypeFromVarName(j, None, True) == types.G2)):
                newlist.append(j)
        if(not(len(set(newlist)) == 0)):
            if(key in assumptionData['varmap']):
                newDeps[assumptionData['varmap'][key]] = set(newlist)
            else:
                newDeps[key] = set(newlist)
    assumptionData['newDeps'] = newDeps

    assumptionData['assumptionFile'] = assumption_file
    assumptionData['config'] = cm

    assumptionData['options']['type'] = "assumption"
    assumptionData['newLines0'] = newLines0

    return assumptionData