コード例 #1
0
def main():
    print("Welcome sir!")
    sys.stdout.flush()
    speak("welcome sir")
    time.sleep(1)
    wishme(speak)

    while True:
        try:

            query = str(takecommand().lower())

            print("ok sir!")
            sys.stdout.flush()
            speak("ok sir!")
            print('processing...')
            sys.stdout.flush()
            speak('processing...')
            function(speak, query)
            time.sleep(2)
            print("your next command sir")
            sys.stdout.flush()
            speak("your next command sir")
            time.sleep(1)

        except Exception as e:
            print("Say that again please...")
            sys.stdout.flush()
            speak("Say that again please...")
コード例 #2
0
ファイル: pyml.py プロジェクト: arizvisa/pyml
    def __call__(self, *sequence):
        # convert sequence into ika-types
        sequence = tuple(cache.cons(x) for x in sequence)

        result = function(self.value)
        result.sequence = sequence
        return result
コード例 #3
0
def service(data):
	x= data['val']
	print(x)
	functions = []
	for i, line in enumerate(x.splitlines()):
		if line.startswith("ENDS"):
			break
		print(line)
		split = line.split(" ")
		if i == 0:
			start = int(split[0])
			
			targetStr = split[1]
			if '+' in targetStr:
				target = linearset(int(targetStr.split('+')[0]), int(targetStr.split('+')[1]),-1)
			else:
				target = linearset(int(split[1]))

			if len(split) > 2:
				expectation = split[2] == 'True'
			else:
				expectation = None
		else:
			functions.append(function(int(split[0]),int(split[1])))
	print(start,target,functions)
	inst = instance(start,target,functions)
	inst.setExp(expectation)
	return manual(inst)
コード例 #4
0
	def relate(self, function=intersection):
		relations = Sequence()
		for i in range(1, len(self)):
			previous = self[i-1]
			current = self[i]
			if not isinstance(previous, list):
				previous = [previous]
			if not isinstance(current, list):
				current = [current]
			relation = function(previous, current)
			relations.append(relation)
		return relations
コード例 #5
0
def genetic():
    param = {
        'firstGeneration': 20,
        'nextGeneration': 10,
        'bestParants': 3,
        'goodParants': 3
    }
    maxIteration = 1e5
    eps = 1e-5
    generation = []
    fun = []
    for i in range(firstGeneration):
        coordinates = ()
        for j in range(NUMBER_VAR):
            coordinates += (random.uniform(BEG, END), )
        generation.append(coordinates)
        fun.append(function(coordinates))
    print('global - ', fun)
    Sort(fun, generation)
    print('global - ', fun)
    globMin = fun[0]
    dotMin = generation[0]
    iteration = 1
    while iteration < MAX_ITERATION:
        iteration += 1
        generation = evolution(generation)
        fun = []
        for i in range(len(generation)):
            fun.append(function(generation[i]))
        Sort(fun, generation)
        if (fun[0] < globMin):
            # if(globMin-fun[0]<eps):
            #     globMin=fun[0]
            #     dotMin=generation[0]
            #     break
            globMin = fun[0]
            dotMin = generation[0]
    print('global minimum - {:6.7g}'.format(globMin))
    print('dot: ', dotMin)
    print('iteration - ', iteration)
コード例 #6
0
ファイル: datacomplex.py プロジェクト: MajidAbdolshah/CA-MOBO
def initvals_(bounds, INITIAL, flag_):
    str_ = "Initializing " + str(INITIAL) + " Data"
    print_fancy(str_, 0.1)
    gData = np.zeros([INITIAL, INPUT_DIM])
    for i in range(0, INITIAL):
        for j in range(0, INPUT_DIM):
            gData[i, j] = random.uniform(bounds['min'][j], bounds['max'][j])
    gDataB = copy.deepcopy(gData)
    if flag_:
        gDataY = function(gDataB)
        return gData, gDataY
    else:
        return gData.T
コード例 #7
0
ファイル: main6.py プロジェクト: Nisha2333/function-hub
def ss():
    def function(x):
        y = x == 0
        return y.astype(np.int)

    x = np.arange(-10, 10)
    y = function(x)
    fig = plt.figure()
    ax1 = fig.add_subplot(111)
    ax1.set_title('δ(n)')
    plt.xlabel('x')
    plt.ylabel('y')
    ax1.scatter(x, y, c='b', marker='o')
    plt.legend('x1')
    plt.grid(True)
    plt.show()
コード例 #8
0
ファイル: module.py プロジェクト: binarever/python
    def __init_enumerate_imports__(self):
        '''
        Enumerate and add nodes / edges for each import within the module. This routine will pass through the entire
        module structure.
        '''

        for func in self.nodes.values():
            for bb in func.nodes.values():
                for instruction in bb.instructions.values():
                    if instruction.refs_api:
                        (address, api) = instruction.refs_api

                        node = function(address, module=self)
                        node.color = 0xB4B4DA
                        self.add_node(node)

                        edge = pgraph.edge(func.ea_start, address)
                        self.add_edge(edge)
コード例 #9
0
ファイル: module.py プロジェクト: Alwnikrotikz/paimei
    def __init_enumerate_imports__ (self):
        '''
        Enumerate and add nodes / edges for each import within the module. This routine will pass through the entire
        module structure.
        '''

        for func in self.nodes.values():
            for bb in func.nodes.values():
                for instruction in bb.instructions.values():
                    if instruction.refs_api:
                        (address, api) = instruction.refs_api

                        node = function(address, module=self)
                        node.color = 0xB4B4DA
                        self.add_node(node)

                        edge = pgraph.edge(func.ea_start, address)
                        self.add_edge(edge)
コード例 #10
0
ファイル: module.py プロジェクト: binarever/python
    def __init__(self,
                 name="",
                 signature=None,
                 depth=DEPTH_FULL,
                 analysis=ANALYSIS_NONE):
        '''
        Analysis of an IDA database requires the instantiation of this class and will handle, depending on the requested
        depth, the analysis of all functions, basic blocks, instructions and more specifically which analysis techniques
        to apply. For the full list of ananylsis options see defines.py. Specifying ANALYSIS_IMPORTS will require an
        extra one-time scan through the entire structure to propogate functions (nodes) and cross references (edges) for
        each reference API call. Specifying ANALYSIS_RPC will require an extra one-time scan through the entire IDA
        database and will propogate additional function level attributes.

        The signature attribute was added for use in the PaiMei process stalker module, for ensuring that a loaded
        DLL is equivalent to the PIDA file with matching name. Setting breakpoints in a non-matching module is
        obviously no good.

        @see: defines.py

        @type  name:      String
        @param name:      (Optional) Module name
        @type  signature: String
        @param signature: (Optional) Unique file signature to associate with module
        @type  depth:     Integer
        @param depth:     (Optional, Def=DEPTH_FULL) How deep to analyze the module
        @type  analysis:  Integer
        @param analysis:  (Optional, Def=ANALYSIS_NONE) Which extra analysis options to enable
        '''

        # run the parent classes initialization routine first.
        super(module, self).__init__(name)

        self.name = name
        self.base = MinEA() - 0x1000  # XXX - cheap hack
        self.depth = depth
        self.analysis = analysis
        self.signature = signature
        self.ext = {}
        self.log = True

        # convenience alias.
        self.functions = self.nodes

        # enumerate and add the functions within the module.
        if self.log:
            print "Analyzing functions..."

        for ea in Functions(MinEA(), MaxEA()):
            func = function(ea, self.depth, self.analysis, self)
            func.shape = "ellipse"
            self.add_node(func)

        # enumerate and add nodes for each import within the module.
        if self.depth & DEPTH_INSTRUCTIONS and self.analysis & ANALYSIS_IMPORTS:
            if self.log:
                print "Enumerating imports..."

            self.__init_enumerate_imports__()

        # enumerate and propogate attributes for any discovered RPC interfaces.
        if self.analysis & ANALYSIS_RPC:
            if self.log:
                print "Enumerating RPC interfaces..."

            self.__init_enumerate_rpc__()

        # enumerate and add the intramodular cross references.
        if self.log:
            print "Enumerating intramodular cross references..."

        for func in self.nodes.values():
            xrefs = list(CodeRefsTo(func.ea_start, 0))
            xrefs.extend(list(DataRefsTo(func.ea_start)))

            for ref in xrefs:
                from_func = get_func(ref)

                if from_func:
                    # GHETTO - add the actual source EA to the function.
                    if not self.nodes[from_func.startEA].outbound_eas.has_key(
                            ref):
                        self.nodes[from_func.startEA].outbound_eas[ref] = []

                    self.nodes[from_func.startEA].outbound_eas[ref].append(
                        func.ea_start)

                    edge = pgraph.edge(from_func.startEA, func.ea_start)

                    self.add_edge(edge)
コード例 #11
0
def AQFunc(X, dataset, points_, cnt):

    ################# TRAIN THE MODEL
    start_time = time.time()
    mod1, Kernels['ker0'] = trainModel(dataset.data,
                                       np.matrix(dataset.outputs[:, 0]).T,
                                       'ker0', 40)
    mod2, Kernels['ker1'] = trainModel(dataset.data,
                                       np.matrix(dataset.outputs[:, 1]).T,
                                       'ker1', 40)
    Kinv_0 = np.linalg.pinv(Kernels['ker0'].K(dataset.data, dataset.data))
    Kinv_1 = np.linalg.pinv(Kernels['ker1'].K(dataset.data, dataset.data))
    print("_____________________________")
    cprint(
        "GP trained in %s seconds; OK!\n" % round(time.time() - start_time, 5),
        "blue")

    #################  FIND THE PARETO
    start_time = time.time()
    yPareto = mPareto(dataset.outputs)
    xPareto = findXpareto(dataset.data, dataset.outputs, yPareto)
    if (len(xPareto) != len(yPareto)):
        sys.exit("Size of X pareto is not same as Y pareto!")
    #print("_____________________________")
    #print("Found the Pareto in %s seconds; OK!\n" % round(time.time() - start_time,5))

    #################  READY TO LUNCH THE LOOP
    start_time = time.time()
    #copyPareto = deepcopy(yPareto)
    copyxPareto = deepcopy(xPareto)
    SlidingY = np.empty(shape=(0, OUTPUT_DIM))
    grid_, Jgrid_, Ngridholder_, pMap_ = samplePareto(yPareto)
    for valg in Jgrid_:
        if (valg[0, 0] > valg[0, 1]) and (valg[0, 2] > valg[0, 3]):
            yBatch = Generate_bounded(valg[0, 0], valg[0, 2], valg[0, 1],
                                      valg[0, 3], 2)
            SlidingY = np.vstack((SlidingY, yBatch))

    if (SlidingY.shape[0] > 20):
        indices_ = [
            random.randint(0, SlidingY.shape[0] - 1) for p in range(0, 20)
        ]
        SlidingY = SlidingY[indices_]
        #plt.plot(SlidingY[:,0],SlidingY[:,1],"*g")
        #plt.show()

    Faster = {}
    for k in (range(SlidingY.shape[0])):
        AddY = np.vstack((yPareto, np.array([SlidingY[k, 0], SlidingY[k, 1]])))
        ParAddY = mPareto(AddY)
        Fast_1, Fast_2, Fast_3, Fast_4 = samplePareto(ParAddY)
        Faster[k] = Fast_3
    cprint("Grids_ to Handle_: %s" % SlidingY.shape[0], "red")
    x_log = []
    imp_log = []
    indices_ = [random.randint(0, X.shape[0] - 1) for p in range(0, 100)]
    optimizerX = X[indices_]
    #print("_____________________________")
    #print("Data prep. for main loop launched in %s seconds; OK!\n" % round(time.time() - start_time,5))

    stat_ = "Optimizing round " + str(cnt)
    for i in (range(len(optimizerX))):
        progress(i, len(optimizerX), status=stat_)
        x_log.append([optimizerX[i, :]])
        Total_HVI_diff = 0
        y1, Sigy1 = testModel(mod1, np.array([optimizerX[i, :]]))
        y2, Sigy2 = testModel(mod2, np.array([optimizerX[i, :]]))

        temp_x_pareto = np.vstack((copyxPareto, np.array([optimizerX[i, :]])))
        New_Weights_, New_Paretos_ = WeightPoints(temp_x_pareto, dataset,
                                                  Kernels, Kinv_0, Kinv_1,
                                                  points_)
        slide_size = len(SlidingY)
        for j in (range(slide_size)):
            temp_pareto = np.vstack(
                (New_Paretos_[:-1], np.array([SlidingY[j, 0], SlidingY[j,
                                                                       1]])))
            found_temp_pareto = mPareto(temp_pareto)
            usef_weights = New_Weights_[parY_X(temp_pareto, found_temp_pareto)]
            Probs_dim1 = norm(y1, Sigy1).pdf(SlidingY[j, 0])
            Probs_dim2 = norm(y2, Sigy2).pdf(SlidingY[j, 1])
            EHVI_New = Expected_HVI(found_temp_pareto, usef_weights,
                                    Faster[j]) * Probs_dim1 * Probs_dim2
            Total_HVI_diff += EHVI_New
        imp_log.append(Total_HVI_diff)
    indx = imp_log.index(max(imp_log))
    Best_x = x_log[indx][0]
    Best_y = function(np.array([Best_x]))[0,
                                          0], function(np.array([Best_x]))[0,
                                                                           1]
    xPareto = findXpareto(dataset.data, dataset.outputs, yPareto)
    return Best_x, Best_y, yPareto, xPareto
コード例 #12
0
def generatePureInverter(dir, max):
    val = getRand(dir, max * dir)
    return function(-1, val)
コード例 #13
0
#we are going to try descent with f(x, y) = x^2 + y^2
#only minimum that should work is (0, 0)


def magnitude(vector):
    count = 0
    for entry in vector:
        count += (entry * entry)
    return math.sqrt(count)


def getUnitVector(vector):
    mag = magnitude(vector)
    for i in range(len(vector)):
        vector[i] /= mag
    return vector


#might be better to use a vector type.

func = function([1, 1], [2, 2])
gradient = func.gradient(True)
testVector = [1, 1]
pointGradient = np.array(gradient.evaluate(testVector))
#problem; this is a scalar valued function. how do we step in direction from its output.
print(gradient.evaluate([1, 1]))
print(pointGradient)
unitvector = pointGradient / np.linalg.norm(pointGradient)
# getUnitVector(pointGradient)
print(unitvector)
コード例 #14
0
def AQFunc(X, dataset, cnt):
    ################# TRAIN THE MODEL
    mod1, Kernels['ker0'] = trainModel(dataset.data,
                                       np.matrix(dataset.outputs[:, 0]).T,
                                       'ker0', 40)
    mod2, Kernels['ker1'] = trainModel(dataset.data,
                                       np.matrix(dataset.outputs[:, 1]).T,
                                       'ker1', 40)
    Kinv_0 = np.linalg.pinv(Kernels['ker0'].K(dataset.data, dataset.data))
    Kinv_1 = np.linalg.pinv(Kernels['ker1'].K(dataset.data, dataset.data))

    #################  FIND THE PARETO & CHECK VALIDITY
    start_time = time.time()
    yPareto = mPareto(dataset.outputs)
    xPareto = findXpareto(dataset.data, dataset.outputs, yPareto)
    if (len(xPareto) != len(yPareto)):
        sys.exit("Abort! Size of X pareto is not same as Y pareto!")

    #################  LAUNCH THE LOOP
    indices_ = [
        random.randint(0, X.shape[1] - 1) for p in range(0, MAX_POINTS)
    ]
    optimizerX = X.T[indices_]
    x_log = []
    imp_log = []
    imp_log_reg = []
    Cost_ = []
    BETA = 0.125 * np.log(2 * cnt + 1)
    wgts = MakeW()
    Total_HVI_diff = 0
    Total_HVI_diff_r = 0

    for i in (range(0, MAX_POINTS)):
        progress(i, len(optimizerX), status="")
        x_log.append([optimizerX[i, :]])
        y1, Sigy1 = testModel(mod1, np.array([optimizerX[i, :]]))
        y2, Sigy2 = testModel(mod2, np.array([optimizerX[i, :]]))
        yreg = function(np.array([optimizerX[i, :]]))
        Mu_ = np.array([y1, y2])
        Sigma_ = np.sqrt(BETA) * np.array([Sigy1, Sigy2])
        Total_HVI_diff = np.min(
            [wgts[0] * (Mu_ + Sigma_)[0], wgts[1] * (Mu_ + Sigma_)[1]])
        Total_HVI_diff_r = np.min([wgts[0] * yreg[0, 0], wgts[1] * yreg[0, 1]])
        Cost_.append(Cfunc(optimizerX[i, :], cnt))
        Total_HVI_diff = Total_HVI_diff * (1 - Cost_[-1])
        imp_log.append(Total_HVI_diff)
        Total_HVI_diff_r = Total_HVI_diff_r * (1 - Cost_[-1])
        imp_log_reg.append(Total_HVI_diff_r)

    indx = imp_log.index(max(imp_log))
    Best_x = x_log[indx][0]
    tmp_y = function(np.array([Best_x]))

    indx_reg = imp_log_reg.index(max(imp_log_reg))
    Best_x_reg = x_log[indx_reg][0]
    tmp_y_reg = function(np.array([Best_x_reg]))

    cprint("\nLaunching regret core successfully!", "red")
    Best_y = tmp_y[0, 0], tmp_y[0, 1]

    return Best_x, Best_y, (
        np.min([wgts[0] * tmp_y_reg[0, 0], wgts[1] * tmp_y_reg[0, 1]]) *
        (1 - Cost_[indx_reg]) -
        np.min([wgts[0] * tmp_y[0, 0], wgts[1] * tmp_y[0, 1]]) *
        (1 - Cost_[indx]))
コード例 #15
0
 def inner_div(a, b):
     if a < b:
         a, b = b, a
     return function(a, b)
コード例 #16
0
def generateCounter(dir, max):
    val = getRand(dir, max * dir)
    return function(1, val)
コード例 #17
0
def generateInvertingGrowing(max):
    mult = getRand(2, max)
    counter = getRand(-max, max)
    return function(-mult, counter)
コード例 #18
0
def test_01():
  """ Testing 01 """  
  D("test_01 here")
  assert function(100) == 100
コード例 #19
0
def generateNullInverter(max):
    return function(-1, 0)
コード例 #20
0
ファイル: preprocessor.py プロジェクト: 4Liamk/KFusion
def main():
	"""Main method:
		read in from three files: main, library and kernel.  
		In the main we find fusion regions and fuse them depending on the type of fusion.
		
		Vertical fusion fuses functions vertically removing I/O and is effectively deforestation
		
		Horization fusion fuses functions horizatonally effectively allowing for several independent operations to occure on the same hardware in the same kernel.  This improve capacity and allows us to leverage concurrency.
		"""

	global replacements
	replacements = dict()	
	if(len(sys.argv) < 3) :
		print "Correct Usage : ./preproc2 <mainfile> <libfile> <kernelfile>"
		
	mainfile = sys.argv[1]	#"main.cpp"
	libfile = sys.argv[2]	#"img.cpp"
	kernelfile = sys.argv[3]	#"kernels.cl"
	namespace = "../imagproc-c/lclImage"
	
	#handle what the library header file will be
	temp = libfile.split(".")[0] + ".h"
	headerFileName = libfile.split(".")[0] + ".h"
	header = open(temp,"r")
	lexer = lex.lex()
	
	#update types and protected words:
	global protectedWords
	protectedWords += additionalProtectedWords
	
	#open up the new mainfile
	temp = mainfile.split(".")[0] + "-out." + mainfile.split(".")[1]
	mainout = open(temp,"w")
	
	#open new libfile 
	temp = libfile.split(".")[0] + "-out." + libfile.split(".")[1]
	libout = open(temp,"w")
	
	#open new kernel file
	temp = kernelfile.split(".")[0] + "-out." + kernelfile.split(".")[1]
	kernelout = open(temp,"w")
	
	#open new header file
	temp= libfile.split(".")[0] + "-out.h"
	headerout=open(temp,"w")
	
	#set of some replacements.  Basically whenever kernel.cl is referenced in the source code we need to actually reference kernel-out.cl
	replacements["\""+libfile.split(".")[0] + ".h" + "\""] = "\""+libfile.split(".")[0] + "-out.h" + "\""
	replacements["\""+kernelfile+"\""] = "\""+ kernelfile.split(".")[0] + "-out." + kernelfile.split(".")[1] + "\""

	#go through main file
	state = 0
	calls = []
	
	#functions we need to keep track of:
	#need to write a new init functiono which calls the old one, this bridges the gap
	initializationFunction = ""
	
	#collection of fused calls to be used later
	fusedfunctions = []
	
	
	print "Kernel File Analysis"
	#set up lexer for kernel files
	lexer = TokenReader(kernelfile,replacements,kernelout)
	kernels = []
	while True:
		tok = lexer.tw()
		if not tok:
		   break
		elif(tok.value == "__kernel"):
			kernels.append(kernel(lexer))
			

	print "Library Analysis"
	""""
		plow through our library to collect library information
		The key thing here is collect synchronization info
		Function we want to leverage later (init) are assigned to relevant variables
		"""		   
	state = 0
	pre = 0
	outfile = libout
	functions = []
	lexer = TokenReader(libfile,replacements,libout)
	print "opening lib file: ", libfile
	isInit = False
	isSyncIn = False
	isSyncOut = False
	while True:
		tok = lexer.tw()
		if not tok:
		   break
		elif(tok.type == "PRAGMA"):
			words =  tok.value.split()
			if("synchronize" in words):
				if("out" in words):
					isSyncOut = True
				if("in" in words):
					isSyncIn = True	   
		if(tok.type == 'TYPEID'):
			tok2 = lexer.tw()
			if not tok2:
				break
			if(tok2.type == 'ID'):
			   tok3 = lexer.tw()
			   if not tok3:
				break
			   if(tok3.type == 'LPAREN'):
				functions.append(function(tok,tok2,lexer))
				if(tok2.value == "init"):
					initializationFunction = functions[-1]
				functions[-1].isSyncIn = isSyncIn
				functions[-1].isSyncOut = isSyncOut
				isSyncIn = False
				isSyncOut = False			
	

	print "Main File Analysis and Synthesis"
	"""
	State 0 - nothing special look for start fuse, but otherwise print out input
	State 1 - we are in a fusion region - catalogue called functions and arguments. also look for exit
	"""
	lexer = TokenReader(mainfile,replacements,mainout)
	fusionType = ''
	while True:
		tok = lexer.token()

		if not tok: 
		    break
		if(tok.value in replacements):
			mainout.write(replacements[tok.value])
		elif(tok.type == "PRAGMA"):
			if(tok.value.split()[1] == "startfuse"):
				state = 1
				print "Starting fusion on line:", tok.lineno
				fusionType = 'VERTICAL'
			elif(tok.value.split()[1] == "starthfuse"):
				state = 1
				fusionType = 'HORIZONTAL'
				print "Starting fusion on line:", tok.lineno
			elif(tok.value.split()[1] == "endfuse"):
				state = 0
				if(calls):	
					fusedfunctions.append(funfusion(calls,fusionType))
					calls = []		
					print "we now have fused function:", fusedfunctions[-1] , fusedfunctions[-1].type
					mainout.write(fusedfunctions[-1].fusedcall.__str__())

			else:
				mainout.write(tok.value)
		elif(state == 1):
			if(tok.type == 'ID'):
				tok4 = lexer.token()
				if(tok4.type == 'LPAREN'):
					"""LOOK A FUNCTION CALL"""
					call = functionCall(tok)
					#handle any synchronization requirement.  A syncIn must fuse all previous calls, a SyncOut must immediately fuse after the call
					found = False
					for fun in functions:
						if fun.ID.value == call.call.value:
							found = True
							if(fun.isSyncIn):
								if(calls):
									fusedfunctions.append(funfusion(calls,fusionType))	
									calls = []	
									mainout.write(fusedfunctions[-1].fusedcall.__str__())
									print "we now have fused function:", fusedfunctions[len(fusedfunctions)-1] , fusedfunctions[len(fusedfunctions)-1].type									
							calls.append(call)
							tok5 = lexer.token()
							arg = ""
							while(tok5.type != 'RPAREN'):
								if(tok5.type == 'COMMA'):
									#print "arg:",arg
									calls[-1].addArg(arg)
									arg = ""
								else:
									arg += str(tok5.value)
								tok5 = lexer.token()
								if(tok5.type == 'RPAREN'):
									#print "END:",arg
									calls[-1].addArg(arg)							
							if(fun.isSyncOut):	
								fusedfunctions.append(funfusion(calls,fusionType))	
								calls = []
								mainout.write(fusedfunctions[-1].fusedcall.__str__())
								print "we now have fused function:", fusedfunctions[len(fusedfunctions)-1] , fusedfunctions[len(fusedfunctions)-1].type
							break;
					if(not found):
						print "Error 1: Function: ", call.call.value , " Not found in library file: ", sys.argv[2]
						exit(1)

		elif(tok.value == "init"):
			mainout.write("initFusion")

		else:
			mainout.write(tok.value)
	mainout.close()
	
	print "library synthesis"
	fusions = []
	setOutput(libout)	
	for fun in fusedfunctions:
		print "creating fusion:",str(fun),"type: ",fun.type
		type = fun.type
		tofuse = []
		for call in fun.funs:
			for f in functions:
				if(f.ID.value == call.call.value):
					cp = copy.deepcopy(f)
					tofuse.append((call,cp))
					print f.ID,cp.ID
		argDict = dict()
		childfunctions = []
		count = 0
		for call, fun in tofuse:
			for i in xrange(len(call.args)):
				if(call.args[i] not in argDict):
					argDict[call.args[i]] = "arg_" + str(count) 
					count += 1
				fun.replaceArg(i,argDict[call.args[i]])
			fun.contaminate()
			childfunctions.append(fun)
		print "Performing ", fun.type, " fusion"
		newfunction = function("void","NEW",None,childfunctions,type)
		print newfunction
		fusions.append(newfunction) 
			
	#take care of having to parse new kernels by adding a new init function
	print "Creating new code to parse newely created kernels"
	for fun in fusions:
		libout.write("cl_kernel " + fun.newKernel + ";\n")
		libout.write(str(fun))
		
		
	#create new initialization function based on the previous one
	libout.write("void initFusion")
	libout.write(initializationFunction.printArguments())
	libout.write("{\n")
	
	#call the original with the correct arguments
	string = "\tinit("
	for arg in initializationFunction.args:
		string += arg[-1].value
		if(arg != initializationFunction.args[-1]):
			string += ","
		else:
			string += ");\n"
	
	string += "cl_int result;\n"
	for fun in fusions:
		string += "\t" + fun.newKernel + "= clCreateKernel("+ clProgramName +",\""+fun.newKernel.split("kernel")[0].strip()
		
		if(fun.ftype == 'HORIZONTAL'):
			string += 'h'
		
		string+="\",&result);" + "\n"
		string += "\tcheck(result);\n" 
	string +="}\n"
	libout.write(string)

	#add used function definitions to the header file
	#this assumes you use header guards.  If you don't it will add an extraneous #endif at the end 
	print "Updating library header file"
	for line in header:
		if(line.strip() != "#endif"):
			headerout.write(line)
	for fun in fusions:
		headerout.write(str(fun.call()))
		headerout.write("extern cl_kernel " + fun.newKernel + ";\n")
	headerout.write("void initFusion"+initializationFunction.printArguments()+";\n")
	headerout.write("#endif\n")
	headerout.close()

	#create the new kernels
	print "Creating new Kernels"
	setOutput(kernelout)
	for fun in fusions:
		tofuse = []
		argDict = dict()
		count = 0
		ftype = fun.ftype
		#print fun.kernelInvocations[-1].args[-1]
		newArg = Statement(None,-1,'OTHER')
		newArg.tokens = []
		newArg.tokens.append(makeToken("const int",'TYPEID'))
		newArg.tokens.append(makeToken("newSize",'ID'))
		cid = 0
		for clkernel in fun.kernelInvocations[:-1]:
			for k in kernels:
				if(clkernel.kernel.children[1].tokens[0].value.split("_")[0] == k.ID.value):
					tofuse.append(copy.deepcopy(k))	
					for i in range(len(clkernel.args)):
						type, value =  clkernel.args[i]
						value = str(value)
						if value not in argDict:
							argDict[value] = "arg_" + str(count)
							count += 1 
						tofuse[-1].replaceArg(i,argDict[value])	
					tofuse[-1].contaminate()
					tofuse[-1].cid = str(cid)
					cid += 1
					if(ftype == 'HORIZONTAL'):
						tofuse[-1].args.append(newArg.tokens)
		#print tofuse[-1]
		tree = fusionTree(tofuse,ftype)
		#print tree.node.call
		kernelout.write(str(tree.node.call))
コード例 #21
0
ファイル: module.py プロジェクト: Alwnikrotikz/paimei
    def __init__ (self, name="", signature=None, depth=DEPTH_FULL, analysis=ANALYSIS_NONE):
        '''
        Analysis of an IDA database requires the instantiation of this class and will handle, depending on the requested
        depth, the analysis of all functions, basic blocks, instructions and more specifically which analysis techniques
        to apply. For the full list of ananylsis options see defines.py. Specifying ANALYSIS_IMPORTS will require an
        extra one-time scan through the entire structure to propogate functions (nodes) and cross references (edges) for
        each reference API call. Specifying ANALYSIS_RPC will require an extra one-time scan through the entire IDA
        database and will propogate additional function level attributes.

        The signature attribute was added for use in the PaiMei process stalker module, for ensuring that a loaded
        DLL is equivalent to the PIDA file with matching name. Setting breakpoints in a non-matching module is
        obviously no good.

        @see: defines.py

        @type  name:      String
        @param name:      (Optional) Module name
        @type  signature: String
        @param signature: (Optional) Unique file signature to associate with module
        @type  depth:     Integer
        @param depth:     (Optional, Def=DEPTH_FULL) How deep to analyze the module
        @type  analysis:  Integer
        @param analysis:  (Optional, Def=ANALYSIS_NONE) Which extra analysis options to enable
        '''

        # run the parent classes initialization routine first.
        super(module, self).__init__(name)

        self.name      = name
        self.base      = MinEA() - 0x1000      # XXX - cheap hack
        self.depth     = depth
        self.analysis  = analysis
        self.signature = signature
        self.ext       = {}
        self.log       = True

        # convenience alias.
        self.functions = self.nodes

        # enumerate and add the functions within the module.
        if self.log:
            print "Analyzing functions..."

        for ea in Functions(MinEA(), MaxEA()):
            func = function(ea, self.depth, self.analysis, self)
            func.shape = "ellipse"
            self.add_node(func)

        # enumerate and add nodes for each import within the module.
        if self.depth & DEPTH_INSTRUCTIONS and self.analysis & ANALYSIS_IMPORTS:
            if self.log:
                print"Enumerating imports..."

            self.__init_enumerate_imports__()

        # enumerate and propogate attributes for any discovered RPC interfaces.
        if self.analysis & ANALYSIS_RPC:
            if self.log:
                print "Enumerating RPC interfaces..."

            self.__init_enumerate_rpc__()

        # enumerate and add the intramodular cross references.
        if self.log:
            print "Enumerating intramodular cross references..."

        for func in self.nodes.values():
            xrefs = list(CodeRefsTo(func.ea_start, 0))
            xrefs.extend(list(DataRefsTo(func.ea_start)))

            for ref in xrefs:
                from_func = get_func(ref)

                if from_func:
                    # GHETTO - add the actual source EA to the function.
                    if not self.nodes[from_func.startEA].outbound_eas.has_key(ref):
                        self.nodes[from_func.startEA].outbound_eas[ref] = []

                    self.nodes[from_func.startEA].outbound_eas[ref].append(func.ea_start)

                    edge = pgraph.edge(from_func.startEA, func.ea_start)

                    self.add_edge(edge)
コード例 #22
0
ファイル: main.py プロジェクト: OlafLee/DeepLearningTool
import numpy as np
from OPTree import *
from Tensor import *
from function import *








if __name__ == '__main__':
    a = Tensor('a')
    b = Tensor('b')
    c = Tensor('c')
    d = Tensor('d')
    
    f = a.dot(b)+c
    g = a*b+d*c
    print f,type(f)
    fun = function([a,b,c,d],[f])
    
    x1 = np.ones((1,5))
    x2 = 2*np.ones((5,1))
    x3 = 3*np.ones((1,1))
    x4 = 4*np.ones((2,1))
    
    print fun.calc([x1,x2,x3,x4])
    print fun.gradient([x1,x2,x3,x4])
コード例 #23
0
ファイル: preprocessor.py プロジェクト: jeremy-w/KFusion
def main():
    """Main method:
		read in from three files: main, library and kernel.  
		In the main we find fusion regions and fuse them depending on the type of fusion.
		
		Vertical fusion fuses functions vertically removing I/O and is effectively deforestation
		
		Horization fusion fuses functions horizatonally effectively allowing for several independent operations to occure on the same hardware in the same kernel.  This improve capacity and allows us to leverage concurrency.
		"""

    global replacements
    replacements = dict()
    if (len(sys.argv) < 3):
        print "Correct Usage : ./preproc2 <mainfile> <libfile> <kernelfile>"

    mainfile = sys.argv[1]  #"main.cpp"
    libfile = sys.argv[2]  #"img.cpp"
    kernelfile = sys.argv[3]  #"kernels.cl"
    namespace = "../imagproc-c/lclImage"

    #handle what the library header file will be
    temp = libfile.split(".")[0] + ".h"
    headerFileName = libfile.split(".")[0] + ".h"
    header = open(temp, "r")
    lexer = lex.lex()

    #update types and protected words:
    global protectedWords
    protectedWords += additionalProtectedWords

    #open up the new mainfile
    temp = mainfile.split(".")[0] + "-out." + mainfile.split(".")[1]
    mainout = open(temp, "w")

    #open new libfile
    temp = libfile.split(".")[0] + "-out." + libfile.split(".")[1]
    libout = open(temp, "w")

    #open new kernel file
    temp = kernelfile.split(".")[0] + "-out." + kernelfile.split(".")[1]
    kernelout = open(temp, "w")

    #open new header file
    temp = libfile.split(".")[0] + "-out.h"
    headerout = open(temp, "w")

    #set of some replacements.  Basically whenever kernel.cl is referenced in the source code we need to actually reference kernel-out.cl
    replacements["\"" + libfile.split(".")[0] + ".h" +
                 "\""] = "\"" + libfile.split(".")[0] + "-out.h" + "\""
    replacements["\"" + kernelfile + "\""] = "\"" + kernelfile.split(
        ".")[0] + "-out." + kernelfile.split(".")[1] + "\""

    #go through main file
    state = 0
    calls = []

    #functions we need to keep track of:
    #need to write a new init functiono which calls the old one, this bridges the gap
    initializationFunction = ""

    #collection of fused calls to be used later
    fusedfunctions = []

    print "Kernel File Analysis"
    #set up lexer for kernel files
    lexer = TokenReader(kernelfile, replacements, kernelout)
    kernels = []
    while True:
        tok = lexer.tw()
        if not tok:
            break
        elif (tok.value == "__kernel"):
            kernels.append(kernel(lexer))

    print "Library Analysis"
    """"
		plow through our library to collect library information
		The key thing here is collect synchronization info
		Function we want to leverage later (init) are assigned to relevant variables
		"""
    state = 0
    pre = 0
    outfile = libout
    functions = []
    lexer = TokenReader(libfile, replacements, libout)
    print "opening lib file: ", libfile
    isInit = False
    isSyncIn = False
    isSyncOut = False
    while True:
        tok = lexer.tw()
        if not tok:
            break
        elif (tok.type == "PRAGMA"):
            words = tok.value.split()
            if ("synchronize" in words):
                if ("out" in words):
                    isSyncOut = True
                if ("in" in words):
                    isSyncIn = True
        if (tok.type == 'TYPEID'):
            tok2 = lexer.tw()
            if not tok2:
                break
            if (tok2.type == 'ID'):
                tok3 = lexer.tw()
                if not tok3:
                    break
                if (tok3.type == 'LPAREN'):
                    functions.append(function(tok, tok2, lexer))
                    if (tok2.value == "init"):
                        initializationFunction = functions[-1]
                    functions[-1].isSyncIn = isSyncIn
                    functions[-1].isSyncOut = isSyncOut
                    isSyncIn = False
                    isSyncOut = False

    print "Main File Analysis and Synthesis"
    """
	State 0 - nothing special look for start fuse, but otherwise print out input
	State 1 - we are in a fusion region - catalogue called functions and arguments. also look for exit
	"""
    lexer = TokenReader(mainfile, replacements, mainout)
    fusionType = ''
    while True:
        tok = lexer.token()

        if not tok:
            break
        if (tok.value in replacements):
            mainout.write(replacements[tok.value])
        elif (tok.type == "PRAGMA"):
            if (tok.value.split()[1] == "startfuse"):
                state = 1
                print "Starting fusion on line:", tok.lineno
                fusionType = 'VERTICAL'
            elif (tok.value.split()[1] == "starthfuse"):
                state = 1
                fusionType = 'HORIZONTAL'
                print "Starting fusion on line:", tok.lineno
            elif (tok.value.split()[1] == "endfuse"):
                state = 0
                if (calls):
                    fusedfunctions.append(funfusion(calls, fusionType))
                    calls = []
                    print "we now have fused function:", fusedfunctions[
                        -1], fusedfunctions[-1].type
                    mainout.write(fusedfunctions[-1].fusedcall.__str__())

            else:
                mainout.write(tok.value)
        elif (state == 1):
            if (tok.type == 'ID'):
                tok4 = lexer.token()
                if (tok4.type == 'LPAREN'):
                    """LOOK A FUNCTION CALL"""
                    call = functionCall(tok)
                    #handle any synchronization requirement.  A syncIn must fuse all previous calls, a SyncOut must immediately fuse after the call
                    found = False
                    for fun in functions:
                        if fun.ID.value == call.call.value:
                            found = True
                            if (fun.isSyncIn):
                                if (calls):
                                    fusedfunctions.append(
                                        funfusion(calls, fusionType))
                                    calls = []
                                    mainout.write(
                                        fusedfunctions[-1].fusedcall.__str__())
                                    print "we now have fused function:", fusedfunctions[
                                        len(fusedfunctions) -
                                        1], fusedfunctions[len(fusedfunctions)
                                                           - 1].type
                            calls.append(call)
                            tok5 = lexer.token()
                            arg = ""
                            while (tok5.type != 'RPAREN'):
                                if (tok5.type == 'COMMA'):
                                    #print "arg:",arg
                                    calls[-1].addArg(arg)
                                    arg = ""
                                else:
                                    arg += str(tok5.value)
                                tok5 = lexer.token()
                                if (tok5.type == 'RPAREN'):
                                    #print "END:",arg
                                    calls[-1].addArg(arg)
                            if (fun.isSyncOut):
                                fusedfunctions.append(
                                    funfusion(calls, fusionType))
                                calls = []
                                mainout.write(
                                    fusedfunctions[-1].fusedcall.__str__())
                                print "we now have fused function:", fusedfunctions[
                                    len(fusedfunctions) -
                                    1], fusedfunctions[len(fusedfunctions) -
                                                       1].type
                            break
                    if (not found):
                        print "Error 1: Function: ", call.call.value, " Not found in library file: ", sys.argv[
                            2]
                        exit(1)

        elif (tok.value == "init"):
            mainout.write("initFusion")

        else:
            mainout.write(tok.value)
    mainout.close()

    print "library synthesis"
    fusions = []
    setOutput(libout)
    for fun in fusedfunctions:
        print "creating fusion:", str(fun), "type: ", fun.type
        type = fun.type
        tofuse = []
        for call in fun.funs:
            for f in functions:
                if (f.ID.value == call.call.value):
                    cp = copy.deepcopy(f)
                    tofuse.append((call, cp))
                    print f.ID, cp.ID
        argDict = dict()
        childfunctions = []
        count = 0
        for call, fun in tofuse:
            for i in xrange(len(call.args)):
                if (call.args[i] not in argDict):
                    argDict[call.args[i]] = "arg_" + str(count)
                    count += 1
                fun.replaceArg(i, argDict[call.args[i]])
            fun.contaminate()
            childfunctions.append(fun)
        print "Performing ", fun.type, " fusion"
        newfunction = function("void", "NEW", None, childfunctions, type)
        print newfunction
        fusions.append(newfunction)

    #take care of having to parse new kernels by adding a new init function
    print "Creating new code to parse newely created kernels"
    for fun in fusions:
        libout.write("cl_kernel " + fun.newKernel + ";\n")
        libout.write(str(fun))

    #create new initialization function based on the previous one
    libout.write("void initFusion")
    libout.write(initializationFunction.printArguments())
    libout.write("{\n")

    #call the original with the correct arguments
    string = "\tinit("
    for arg in initializationFunction.args:
        string += arg[-1].value
        if (arg != initializationFunction.args[-1]):
            string += ","
        else:
            string += ");\n"

    string += "cl_int result;\n"
    for fun in fusions:
        string += "\t" + fun.newKernel + "= clCreateKernel(" + clProgramName + ",\"" + fun.newKernel.split(
            "kernel")[0].strip()

        if (fun.ftype == 'HORIZONTAL'):
            string += 'h'

        string += "\",&result);" + "\n"
        string += "\tcheck(result);\n"
    string += "}\n"
    libout.write(string)

    #add used function definitions to the header file
    #this assumes you use header guards.  If you don't it will add an extraneous #endif at the end
    print "Updating library header file"
    for line in header:
        if (line.strip() != "#endif"):
            headerout.write(line)
    for fun in fusions:
        headerout.write(str(fun.call()))
        headerout.write("extern cl_kernel " + fun.newKernel + ";\n")
    headerout.write("void initFusion" +
                    initializationFunction.printArguments() + ";\n")
    headerout.write("#endif\n")
    headerout.close()

    #create the new kernels
    print "Creating new Kernels"
    setOutput(kernelout)
    for fun in fusions:
        tofuse = []
        argDict = dict()
        count = 0
        ftype = fun.ftype
        #print fun.kernelInvocations[-1].args[-1]
        newArg = Statement(None, -1, 'OTHER')
        newArg.tokens = []
        newArg.tokens.append(makeToken("const int", 'TYPEID'))
        newArg.tokens.append(makeToken("newSize", 'ID'))
        cid = 0
        for clkernel in fun.kernelInvocations[:-1]:
            for k in kernels:
                if (clkernel.kernel.children[1].tokens[0].value.split("_")[0]
                        == k.ID.value):
                    tofuse.append(copy.deepcopy(k))
                    for i in range(len(clkernel.args)):
                        type, value = clkernel.args[i]
                        value = str(value)
                        if value not in argDict:
                            argDict[value] = "arg_" + str(count)
                            count += 1
                        tofuse[-1].replaceArg(i, argDict[value])
                    tofuse[-1].contaminate()
                    tofuse[-1].cid = str(cid)
                    cid += 1
                    if (ftype == 'HORIZONTAL'):
                        tofuse[-1].args.append(newArg.tokens)
        #print tofuse[-1]
        tree = fusionTree(tofuse, ftype)
        #print tree.node.call
        kernelout.write(str(tree.node.call))
コード例 #24
0
from plot import *
from analyzeData import *
from function import *
import sys

if __name__ == "__main__":

    if len(sys.argv) != 3:
        print("usage: python ./circle/drawInitialFunction.py <xFunc> <yFunc>")
        quit(1)

    xFunc = int(sys.argv[1])
    yFunc = int(sys.argv[2])

    f = function(xFunc, yFunc)
    numPoints = 200
    x0, y0 = 0, 0

    functionPoints = analyzeData().getFunctionPoints(numPoints, x0, y0, f)

    plot().plotFunction(functionPoints)

    plt.show()
コード例 #25
0
ファイル: pyml.py プロジェクト: arizvisa/pyml
 def __getattr__(self, name):
     return function(name)
コード例 #26
0
    observation = 4
    COUNTER_ = 36

    ################# GENERATE DATASET
    start_point = [0, 0]
    log_start_point = []
    log_start_point.append(start_point)

    while ancnt < COUNTER_:
        print("Get ready for reset...")
        ctn = 0
        print(log_start_point)
        time.sleep(3)
        X = Generate_bounded(start_point[0], start_point[0] + depth_,
                             start_point[1] + 0.07, start_point[1] + 0.1, 1000)
        Y = function(X)
        indices_ = [
            random.randint(0, X.shape[0] - 1) for p in range(0, observation)
        ]
        obs_X = X[indices_]
        obs_Y = Y[indices_]

        start_time = time.time()
        xData = obs_X
        yData = obs_Y

        #xData,yData = initvals_(bounds,INITIAL)
        dataset = DataComplex(xData, yData)
        yPareto = mPareto(dataset.outputs)
        xPareto = findXpareto(dataset.data, dataset.outputs, yPareto)
        initial_pareto = yPareto