Beispiel #1
0
def Chernoff_bounds(t, s):
    timing.log("Chernoff bound starts")
    '''
    return the probability, input the targeted time point t and s
    1. first calculate the total number of jobs among all tasks
    2. calculate mgf function for each task with their corresponding number jobs in nlist
    3. using input s \in {0, b} to find the minimal result
    '''
    #input a is the selected point
    prob = 1.0
    count = 0
    #now sumN is the total number of jobs among all the tasks.
    c1, c2, x, p = symbols("c1, c2, x, p")
    expr = exp(c1 * x) * (1 - p) + exp(c2 * x) * p
    mgf = lambdify((c1, c2, x, p), expr)
    nlist = []
    for i in range(n):
        nlist.append(math.ceil(t / PSet[i]['period']))
    for i in nlist:
        prob = prob * (mgf(PSet[count]['NWCET'], PSet[count]['AWCET'], s,
                           PSet[count]['prob']))**int(i)
        count += 1
    prob = prob / exp(s * t)

    timing.log("Chernoff bound ends")
    return prob
def usingDASK(file):
    chunks = []
    timing.log("Starting reading-in")

    reader = dd.read_json(
        file,
        lines=True,
        blocksize=2 ** 28,
        meta={"data": object, "message_type": object},
    )
    # t=reader['data'].map_partitions(lambda df: df.apply(lambda x: x.apply(flattenDict, key='', result={}))).to_bag()
    # t=reader.map_partitions(lambda df: df['data'].apply(flattenDict, key='', result={})).to_bag()
    datas = (
        reader["data"]
        .map_partitions(lambda df: df.apply((lambda row: flattenDict(row, "", {}))))
        .to_bag()
    )
    new = datas.to_dataframe()
    new["message_type"] = reader["message_type"]
    new = new.compute()
    dups = new.duplicated(subset="leaf_cert.fingerprint")
    dups = new[dups]
    dups.to_csv("duplicates_DASK.csv")

    """
def getClusters(config, val):

	eigvec = np.load('savefiles/%s_eigv_%ic.npy' %(config['pname'], config['numClusters']));
	clusters = [];
	
	timing.log('Data file accepted, computing cluster indices...');

	#	Finds all indices outside of 2 stddevs of eigenvector mean
	mean = np.mean(eigvec, axis = 0);
	std = np.std(eigvec, axis = 0);
	inClust = np.greater(np.abs(np.subtract(eigvec, mean)),2*std);
	cluster_size = np.sum(inClust, axis = 0);
	indices = np.argsort(-inClust, axis = 0);
	
	for i in range(config['numClusters']):
		clusters.append(indices[:cluster_size[i],i]);
		print 'Cluster-Size: %i' %(cluster_size[i]);
		avg_time = .9777642*np.median(clusters[i]);
		beg_time = .9777642*np.mean(np.sort(clusters[i])[0]);
		end_time = .9777642*np.mean(np.sort(clusters[i])[-1]);

		print 'Cluster %i median Time: %.2fns' %(i+1, avg_time);
		print 'Cluster %i Time Range: %.2fns - %.2fns' %(i+1, beg_time, end_time);
		

	if val.save: np.save('savefiles/%s_clustind_%in_%ic.npy' %(config['pname'], config['n_neighbors'], config['numClusters']), clusters);

	timing.log('Clusters saved!');
def main(argv):
    file = "/home/lnvp-linux-wkst1/Desktop/future/subsample3"
    timing.log("Started V2")
    usingJson_v2(file)
    timing.endlog()
    timing.log("Started v1")
    usingJson(file)
    print("done")
    timing.endlog()

    """
	#get file:
	file=argv
	file="/home/lnvp-linux-wkst1/Desktop/future/ctl_records_subsample"


	certs=[] # for holding all of the ordered data
	#from https://stackoverflow.com/questions/37200302/using-python-ijson-to-read-a-large-json-file-with-multiple-json-objects
	with open(file, encoding="UTF-8") as json_file:
	    cursor = 0
	    for line_number, line in enumerate(json_file):
	        print ("Processing line", line_number + 1,"at cursor index:", cursor)
	        line_as_file = io.StringIO(line)
	        # Use a new parser for each line
	        json_parser = ijson.parse(line_as_file)
	        cert={}
	        # print("json_parser: ", json_parser)
	        for prefix, kind, value in json_parser:
	            # print ("prefix=",prefix, "type=",kind, "value=",value)
	            if "string" is kind:
	            	cert.update({prefix:value})
        	certs.append(cert)
	        cursor += len(line)

	"""
    """
	with open(file, "r") as f:
		for line in f:
			# print(line)
			# cert=sortDictLists(prepRow(line))#, '', {})
			cert=prepRow(line)#, '', {})

			#we need to put the strings from sortDict back into dicts. 
			#MAYBE THIS ISN'T NECESSARY?!
			cs={}
			for c in cert:
				print("c: ", c)
				cs.update({c[0]: c[1]})
			# print("*****type: ", type(cert))
			# print("cert: ",cert)
			# certs.append(cs)
			certs.append(cert)
	"""
    """
Beispiel #5
0
 def plot(self):
     """Plots all pulses together on one graph, then saves it as plot.png"""
     printfl("plotting data...")
     for item in self.listofdata:
         plt.plot([x * (3.3 / 2**13) * 1000 for x in item])
     plt.xlabel("microsec")
     plt.ylabel("mV")
     plt.title("%s Pulses (%s Rise, %s Tail)" % (len(self.listofdata), self.rise, self.tail))
     plt.savefig(self.filedir + "//plot.png", dpi=500)
     plt.show()
     plt.close()
     timing.log("data plotted", timing.log_return())
Beispiel #6
0
def source(serialportname, chunkSize, nmax):
    """returns voltage data from the serial port, looping forever"""
    # Get handle to serial port
    s = serial.Serial(serialportname)
    print("collecting data...")
    curr = 0
    while curr < nmax:
        data = s.read(chunkSize)
        data = np.fromstring(data, dtype=np.uint16)
        q.put(data)
        curr = curr + 1
    timing.log("data collected", timing.log_return())
Beispiel #7
0
def main():
    n = 10
    for i in range(3,7):
        N = n**i
        res = open('primes_' + str(N) + '.txt','w')
        g = genPrimes()
        timing.log('Generating first ' + str(N) + ' primes:')
        start = timing.clock()
        for i in range(N):
            res.write(str(next(g)) + '\n')
        end = timing.clock()
        print ('Time elapsed: ', timing.secondsToStr(end-start))
        print ('\n')
def run(upperBound):

    numList = list(upperBound * [True])
    
    numList[0] = False;
    numList[1] = False;
    
    for (i, isprime) in enumerate(numList):
        if isprime:
            
            for n in range(i*i, upperBound, i):
                numList[n] = False
    
    
    primeList = [i for i, x in enumerate(numList) if x == True]

    timing.log("Prime List Completed")
    
    count = len(primeList) + 4
#     for i in range(0, count//5):
#         
#         if not primeList:
#             item0 = 'xxx'
#         else:
#             item0 = primeList.pop(0)
#         
#         if not primeList:
#             item1 = 'xxx'
#         else:
#             item1 = primeList.pop(0)
#         
#         if not primeList:
#             item2 = 'xxx'
#         else:
#             item2 = primeList.pop(0)
#         
#         if not primeList:
#             item3 = 'xxx'
#         else:
#             item3 = primeList.pop(0)
#             
#         if not primeList:
#             item4 = 'xxx'
#         else:
#             item4 = primeList.pop(0)    
#             
#         print("{0}, {1}, {2}, {3}, {4}\n".format(item0, item1, item2, item3, item4))
    print("Total number of primes in range of 2 to {0} is {1}".format(upperBound, count))
Beispiel #9
0
def get_data(serialportname, loops, chunkSize):
    """returns voltage data from the serial port"""
    # Get handle to serial port
    s = serial.Serial(serialportname)
    printfl("port init")
    voltages = []
    printfl("collecting data...")
    for i in range(loops):
        printfl(i)
        data = s.read(chunkSize * 2)
        data = np.fromstring(data, dtype=np.uint16)
        np.set_printoptions(threshold='nan')
        voltages.append(0)
        voltages += data.tolist()
    timing.log("data collected", timing.log_return())
    return voltages
Beispiel #10
0
def analyze_secondary(voltages, rise, tail, filedir):
    """Used to perform secondary analysis on already collected data"""
    printfl("initalizing object...")

    pltr = Plotter(voltages, rise, tail, filedir)
    timing.log("object initalized", timing.log_return())

    trg = pltr.get_avg() + pltr.get_noise() * 4
    printfl("Trigger value:" + str(trg * (3.3 / 2**13) * 1000))

    pltr.splice_data(trg)

    # Add and remove methods below as needed per run
    pltr.fit()

    pltr.plot_heights(trg, 100, True, True)
Beispiel #11
0
 def save_data(self, txtfl=None):
     """
     Saves all data for that run to a text file
     Input textfile for use with run.py UI
     """
     if txtfl == None:
         txtfl = open(self.filedir + "//voltagedata.txt", 'w')
         printfl("txtfl opened, saving...")
         txtfl.write(str(self.dta))
         txtfl.close()
     else:
         printfl("saving to txt...")
         txtfl.write("Raw data:\n")
         txtfl.write(str(self.dta) + "\n")
         txtfl.write("Data saved at: %s\n" % timing.log_return())
     timing.log("data saved", timing.log_return())
Beispiel #12
0
def do_analysis(program):
    time1 = timing.log("TRACE PARSING RUN #1 (NO INLINING)")
    data = traces.Gem5Parser(program, config.Arguments.gem5_traces)
    debug.verbose_message("HWMT = %d" % data.getLongestTime(), __name__)
    calculations.WCETCalculation(program, data)
    program.output()
    program.generateAllUDrawFiles()

    if program.getCallGraph().numOfvertices() > 1 and config.Arguments.inline:
        program.inlineCalls()
        time2 = timing.log("TRACE PARSING RUN #2 (INLINED PROGRAM)")
        data = traces.Gem5Parser(program, config.Arguments.gem5_traces)
        debug.verbose_message("HWMT = %d" % data.getLongestTime(), __name__)
        calculations.WCETCalculation(program, data)
        program.output()
        program.generateAllUDrawFiles("inlined")
Beispiel #13
0
def do_analysis(program):
    time1 = timing.log("TRACE PARSING RUN #1 (NO INLINING)")
    data = traces.Gem5Parser(program, config.Arguments.gem5_traces)
    debug.verbose_message("HWMT = %d" % data.getLongestTime(), __name__)
    calculations.WCETCalculation(program, data)
    program.output()
    program.generateAllUDrawFiles()

    if program.getCallGraph().numOfvertices() > 1 and config.Arguments.inline:
        program.inlineCalls()
        time2 = timing.log("TRACE PARSING RUN #2 (INLINED PROGRAM)")
        data = traces.Gem5Parser(program, config.Arguments.gem5_traces)
        debug.verbose_message("HWMT = %d" % data.getLongestTime(), __name__)
        calculations.WCETCalculation(program, data)
        program.output()
        program.generateAllUDrawFiles("inlined")
Beispiel #14
0
def get_data_from_text(filedir):
    """returns voltage data from a previously saved text file"""
    printfl("txtfl open")
    txtfl = open(filedir, 'r')
    printfl("reading data...")
    voltages = txtfl.read().split()
    voltagelist = []
    printfl("translating data...")
    for i in range(len(voltages)):
        temp = voltages[i].translate(None, '[],')
        if temp == "":
            pass
        else:
            voltagelist.append(float(temp))
    timing.log("data read", timing.log_return())
    return voltagelist
Beispiel #15
0
def get_data_from_previous_run(filename):
    """function used to read in data from a previously written text file"""
    printfl("txtfl open")
    textfile = open(filename, 'r')
    printfl("reading data...")
    while textfile.readline().strip() != 'Raw Data:':
        pass
    voltages_str = textfile.readline().strip()
    voltages = voltages_str.split()
    voltagelist = []
    printfl("translating data...")
    for voltage in voltages:
        translated = voltage.translate(None, '[],')
        if translated != "" and translated != " ":
            voltagelist.append(float(translated))
    timing.log("data read", timing.log_return())
    return voltagelist
Beispiel #16
0
    def fit(self, savepars=False, showimg=False, saveimg=False, txtfl=None):
        """
        Fits all pulses to a curve and saves the resulting fit parameters if specified. Saves the plot.
        """
        printfl("fitting data...")

        def powerfunc(t, tau, amplitude, offset, c):
            """The function being used for the fit, identical in all the fitting methods below"""
            return amplitude * ((t - offset) / tau)**2 * math.e**(-(t - offset) / tau + (2 - math.log(4, math.e))) + c

        count = 0
        for i in range(len(self.listofdata)):
            x = np.linspace(0, len(self.listofdata[i]), len(self.listofdata[i]))
            try:
                pars, covariance_matrix = sciopt.curve_fit(powerfunc, x, self.listofdata[i], [1, 100, 0, 284])
                self.listofpars.append(pars.tolist())
                if showimg == True or saveimg == True:
                    x = np.linspace(0, len(self.listofdata[i]), 100)
                    plt.plot(x, [y * (3.3 / 2**13) * 1000 for y in powerfunc(x, *pars)])
            #Sometimes curve_fit can't fit
            except (RuntimeError, TypeError):
                count += 1
        if showimg == True or saveimg == True:
            plt.xlabel("microsec")
            plt.ylabel("mV")
            plt.title("%s Pulses" % (len(self.listofdata) - count))
        if saveimg == True:
            plt.savefig(self.filedir + "//fits.png", dpi=500)
        if showimg == True:
            plt.show()
        if saveimg == True or showimg == True:
            plt.close()
        if savepars == True:
            if txtfl == None:
                txtfl = open(self.filedir + "//fitpars.txt", 'w')
                printfl("txtfl opened")
                txtfl.write(str(self.listofpars))
                txtfl.close()
            else:
                printfl("saving to txt...")
                txtfl.write("Parameters:\n")
                txtfl.write(str(self.listofpars) + "\n")
                txtfl.write("Parameters saved at: %s\n" % timing.log_return())
        timing.log("data fitted", timing.log_return())
Beispiel #17
0
    def plot_heights(self, trigger, bins=None, savetxt=False, showimg=False, saveimg=False, txtfl=None):
        """Plots max height of pulses in a histogram"""
        if not self.listofpars:
            printfl("run fit() first")
        else:

            def powerfunc(t, tau, A_i, offset, c):
                return A_i * ((t - offset) / tau)**2 * math.e**(-(t - offset) / tau + (2 - math.log(4, math.e))) + c

            listofmax = []
            count = 0
            for num in self.listofpars:
                if not (2 * num[0] + num[2] < 0 and num[2] < 0): # Ensures that the found point is a max not a min
                    max1 = powerfunc(2 * num[0] + num[2], *num)
                    max2 = powerfunc(num[2], *num)
                    if max1 > max2 and max1 > trigger and max1 < 2**13:
                        listofmax.append(max1)
                        count += 1
                    elif max2 > trigger and max2 < 2**13:
                        listofmax.append(max2)
                        count += 1
            if showimg == True or saveimg == True:
                plt.hist([x * (3.3 / 2**13) * 1000 for x in listofmax], bins)
                plt.xlabel("Pulse Height (mV)")
                plt.ylabel("# of Pulses")
                plt.title("%s Pulses, %s Bins" % (count, bins))
            if savetxt == True:
                if txtfl == None:
                    txtfl = open(self.filedir + "//pulseheights.txt", 'w')
                    printfl("opened txtfl")
                    txtfl.write(str(listofmax))
                    txtfl.close()
                else:
                    printfl("saving to txt...")
                    txtfl.write("Max heights:\n")
                    txtfl.write(str(listofmax) + "\n")
                    txtfl.write("Heights saved at: %s\n" % timing.log_return())
            if saveimg == True:
                plt.savefig(self.filedir + "//heighthist.png")
            if showimg == True:
                plt.show()
            if showimg == True or saveimg == True:
                plt.close()
        timing.log("histogram created", timing.log_return())
Beispiel #18
0
 def splice_data(self, trigger, save=False, txtfl=None):
     """
     given a a trigger value, and expected rise/fall times, returns a list of pulses and saves it if specified
     input textfile for use with run.py
     """
     printfl("splicing data...")
     i = 0
     while i < len(self.dta) - 1:
         if self.dta[i] >= trigger and self.dta[i + 1] >= trigger:
             if i - self.rise < 0:
                 a = 0
             else:
                 a = i - self.rise
             if i + self.tail > len(self.dta):
                 b = len(self.dta)
             else:
                 b = i + self.tail
             flag = False
             for j in range(len(self.dta[a:b])):
                 if self.dta[a:b][j] == 0:
                     flag = True
                     if a == 0:
                         i = j + 1
                     else:
                         i += j + self.rise + 1
             if flag == False:
                 self.listofdata.append(self.dta[a:b])
                 i += self.tail
         else:
             i += 1
     if save == True:
         if txtfl == None:
             txtfl = open(self.filedir + "//pulses.txt", 'w')
             printfl("txtfl opened")
             txtfl.write(str(self.listofdata))
             txtfl.close()
         else:
             printfl("saving to txt...")
             txtfl.write("Pulses:\n")
             txtfl.write(str(self.listofdata) + "\n")
             txtfl.write("Pulses saved at: %s\n" % timing.log_return())
     printfl("%s pulses detected" % len(self.listofdata))
     timing.log("data spliced", timing.log_return())
Beispiel #19
0
    def test_log(self):
        # Checks variable date components and constant string output.
        time_string = timing.seconds_to_string(elapsed=time())
        substrings = [
            'Completed @', 'It took:', 'seconds', '=====', '.', str(datetime.now().year), str(datetime.now().month),
            str(datetime.now().day)
        ]
        saved_stdout = sys.stdout
        try:
            out = StringIO()
            sys.stdout = out

            timing.log(elapsed=time_string)

            output = out.getvalue()
            for substring in substrings:
                self.assertTrue(substring in output, msg=substring)
        finally:
            sys.stdout = saved_stdout
Beispiel #20
0
def analyze_default(voltages, rise, tail, filedir):
    """Used to analyze new data from the Arduino"""
    printfl("initalizing object...")

    pltr = Plotter(voltages, rise, tail, filedir)
    timing.log("object initalized", timing.log_return())

    pltr.save_data()

    printfl("calculating trigger...")
    trg = pltr.get_avg() + pltr.get_noise() * 4
    printfl("Trigger value:" + str(trg * (3.3 / 2**13) * 1000))

    pltr.splice_data(trg)

    # Add and remove methods below as needed per run
    pltr.plot()

    pltr.fit(True, True, True)

    pltr.plot_heights(trg, 100, True, True)
Beispiel #21
0
# Grab the dissolved water geometry feature
fields = ['OID@', 'SHAPE@']
with arcpy.da.SearchCursor(water_dissolve, fields) as cursor:
    for oid, geom in cursor:
        water_geom = geom

# Union the natural area and water features into a single geometry
with arcpy.da.UpdateCursor(nat_areas_dissolve, fields) as cursor:
    for oid, geom in cursor:
        geom = geom.union(water_geom)
        cursor.updateRow((oid, geom))

# Assign the parks/water feature class to more appropriately named variable
water_and_nat_areas = nat_areas_dissolve

timing.log('water and natural areas dissolved and merged')

# Erase merged parks/water features from property data
# Consider try multi-processing for this step at some point as it is very computationally intensive:
# http://blogs.esri.com/esri/arcgis/2011/08/29/multiprocessing/
trimmed_taxlots = os.path.join(env.workspace, 'trimmed_taxlots.shp')
arcpy.analysis.Erase(taxlots, water_and_nat_areas, trimmed_taxlots)

timing.log('tax lots trimmed')

trimmed_multifam = os.path.join(env.workspace, 'trimmed_multifam.shp')
arcpy.analysis.Erase(multi_family, water_and_nat_areas, trimmed_multifam)

timing.log('multi-family housing trimmed')
# ran in 56:47 on 5/19/14
Beispiel #22
0
functions = [vsum, vsin]

header = ["d","n","sizeFull"]
for f in functions:
    header.append("%s_ttSize" %(f.func_name))
    header.append("%s_tensorCrossApproxTime" %(f.func_name))
    
results = [] # Each dim: [d,n,sizefull,f1_ttSize,f1_sizeRatio,...]

for d in D:
    print("### DIMENSION %d ###" %(d))
    localResults = [d,n,"%d^%d"%(n,d)]

    for f in functions:
        t0 = timing.log("Cross-aproximating tensor to TT")
        tones = tt.ones(n,d)
        tensin = cross(f, tones, nswp=10)
        tFoo, tDiffCross = timing.log("Tensor compressed", t0)
        size = numparams(tensin)
        print("n = %d, d = %d, numparams = %d, tDiffCross = %s" %(n,d,size,tDiffCross))
        localResults.append(size)
        localResults.append(tDiffCross)
        
    results.append(localResults)

f = open("results_cross.csv", 'w')
f.write(";".join(header)+"\n")
for line in results:
    sLine = [str(x) for x in line]
    f.write(";".join(sLine)+"\n")
Beispiel #23
0
        )

    config.Arguments.test_specification_file = os.path.splitext(config.Arguments.program_file)[0] + ".test"
    if not os.path.exists(config.Arguments.test_specification_file):
        debug.exit_message(
            "Expected to find the test specification file '%s' but it is not there"
            % config.Arguments.test_specification_file
        )


if __name__ == "__main__":
    the_command_line()
    debug.verbose_message(
        "%s Analysing program '%s' %s" % ("*" * 10, config.Arguments.program_file, "*" * 10), __name__
    )
    time1 = timing.log("COMPILING BEGIN")
    binary, program = get_binary_and_program()
    time2 = timing.log("COMPILING END")
    if config.Arguments.compile:
        debug.exit_message("DONE")
    if config.Arguments.gem5_traces:
        check_trace_files()
    else:
        set_gem5_variables()
        if config.Arguments.ga:
            debug.verbose_message("Using GA to generate test vectors", __name__)
            config.Arguments.gem5_traces.extend(testing.runGAGem5(binary))
        else:
            debug.verbose_message("Running program on gem5 with %d tests" % config.Arguments.tests, __name__)
            config.Arguments.gem5_traces.extend(testing.run_gem5(binary))
    do_analysis(program)
Beispiel #24
0
def nsga_2(input_v,activate_parallel,num_cores):
    ##import sys
    import numpy as np
    from objective_description_function import objective_description_function
    from initialize_variables import initialize_variables
    from non_domination_sort_mod import non_domination_sort_mod
    from tournament_selection import tournament_selection
    import genetic_operator
    from replace_chromosome import replace_chromosome 
    import matplotlib.pyplot as plt
    print('Running Optimization...')
    input_v['objAll'] = []
    input_v['xAll'] = []
    import os
    import timing
    ##Making sure thet population and generations are integers 
    input_v['population'] = round(input_v['population'])
    input_v['generations'] = round(input_v['generations'])
    
    
    ##Forming the objective function 
    ##input_v['M'] --- number of objective functions
    ##input_v['V'] --- number of decision variables 
    ##input_v['min_range'] --- list of corresponding lowerbound of the decision variables 
    ##input_v['max_range'] --- list of corresponding upperbound of the decision variables 
    input_v['M'], input_v['V'], input_v['min_range'], input_v['max_range'] = objective_description_function(input_v)    
    
    ##Initialize the population 
    ##Population is initialzied with random values which are within the specified range. Each chromosome consist of the
    ##decision variables. Also the value fo the objective functions, rank and crowding distance information are also added
    ##to the chromosome vector but only the elements of the vector which has the decision variables are operatedupon to 
    ##perform the genetic operations like crossover and mutation

    chromosome, input_v = initialize_variables(input_v,activate_parallel,num_cores)    
    
    ##Sort the initialized population 
    ##Sort the population using non-domination-sort. This returns two columns for each individual which are the rank and the 
    ##crowding distance corresponding to their position in the front they belong. At this stage the rank and the crowding distance 
    ##for each chromosome is added to the chromosome vector for easy of computation.
    chromosome = non_domination_sort_mod(chromosome, input_v['M'], input_v['V'])
    
    ## Start the evolution process
    # The following are performed in each generation
    # * Select the parents which are fit for reproduction
    # * Perfrom crossover and Mutation operator on the selected parents
    # * Perform Selection from the parents and the offsprings
    # * Replace the unfit individuals with the fit individuals to maintain a
    #   constant population size.
    
    print('Starting search process...')
    for i in range (0, input_v['generations']):
        
        ## Select the parents
        ## Parents are selected for reproduction to generate offspring. The
        ## original NSGA-II uses a binary tournament selection based on the
        ## crowded-comparision operator. The arguments are 
        ## pool - size of the mating pool. It is common to have this to be half the
        ##        population size.
        ## tour - Tournament size. Original NSGA-II uses a binary tournament
        ##        selection, but to see the effect of tournament size this is kept
        ##        arbitary, to be choosen by the user.
        
        pool = int(round(input_v['population']/2))
        tour = 2
        
        ## Selection process
        ## A binary tournament selection is employed in NSGA-II. In a binary
        ## tournament selection process two individuals are selected at random
        ## and their fitness is compared. The individual with better fitness is
        ## selcted as a parent. Tournament selection is carried out until the
        ## pool size is filled. Basically a pool size is the number of parents
        ## to be selected. The input arguments to the function
        ## tournament_selection are chromosome, pool, tour. The function uses
        ## only the information from last two elements in the chromosome vector.
        ## The last element has the crowding distance information while the
        ## penultimate element has the rank information. Selection is based on
        ## rank and if individuals with same rank are encountered, crowding
        ## distance is compared. A lower rank and higher crowding distance is
        ## the selection criteria.

        parent_chromosome = tournament_selection(chromosome, pool, tour)

        ## Perfrom crossover and Mutation operator
        ## The original NSGA-II algorithm uses Simulated Binary Crossover (SBX) and
        ## Polynomial  mutation. Crossover probability pc = 0.9 and mutation
        ## probability is pm = 1/n, where n is the number of decision variables.
        ## Both real-coded GA and binary-coded GA are implemented in the original
        ## algorithm, while in this program only the real-coded GA is considered.
        ## The distribution indeices for crossover and mutation operators as mu = 20
        ## and mum = 20 respectively.
        
        mu = 20 
        mum = 20
        offspring_chromosome, input_v = genetic_operator.genetic_operator(parent_chromosome, input_v['M'], input_v['V'], mu, mum, 
                                                         input_v['min_range'], input_v['max_range'], input_v,num_cores,activate_parallel)
        
        ##Intermediate population 
        ##Intermediate population is the combined population of parents and offsprings of the current generation. The population
        ##size is two times the initial population.
        
        dim_chromosome = chromosome.shape 
        main_pop = dim_chromosome[0]

        dim_offspring_chromosome = offspring_chromosome.shape 
        offspring_pop = dim_offspring_chromosome[0]

        ##Intermediate_chromosome is a concatenation of current population and the offspring population.
        intermediate_chromosome = np.zeros((main_pop + offspring_pop, input_v['M'] + input_v['V']))
        for j in range (0, main_pop):
            for k in range (0, input_v['M'] + input_v['V']):
                intermediate_chromosome[j,k] = chromosome[j,k]
        for j in range (main_pop, main_pop + offspring_pop):
            for k in range (0, input_v['M'] + input_v['V']):
                intermediate_chromosome[j,k] = offspring_chromosome[j-main_pop,k]        
        
        ##Non-domination-sort of intermediate population
        ##The intermediate population is sorted again based on non-domination sort before the replacement operator is performed 
        ##on the intermediate population.
        
        intermediate_chromosome = non_domination_sort_mod(intermediate_chromosome, input_v['M'], input_v['V'])
        ##np.savetxt('C:\\Optimization_zlc\\solution.csv', intermediate_chromosome, delimiter=',')
        ##sys.exit()
        
        ##Perform Selection
        ##Once the intermediate population is sorted only the best solution is selected based on it rank and crowding distance. 
        ##Each front is filled in ascending order until the addition of population size is reached. The last front is included 
        ##in the population based on the individuals with least crowding distance
        

        chromosome = replace_chromosome(intermediate_chromosome, input_v['M'], input_v['V'], input_v['population'])

        print('Search ' + 'iteration ' + str(i))
        
#        if not(i%100) and (i !=0):
#            disp_msg = str(i) + ' generations completed \n'
#            print(disp_msg)
    
    ##Result
    ##Save the result in csv format
    xAll = input_v['xAll']
    objAll = input_v['objAll']

    np.savetxt(os.path.dirname(__file__)+'\\solution\\solution.csv', chromosome, delimiter=',')
    np.savetxt(os.path.dirname(__file__)+'\\solution\\solutionX.csv', xAll, delimiter=',')
    np.savetxt(os.path.dirname(__file__)+'\\solution\\solutionObj.csv', objAll, delimiter=',')
    
    ##Visualize 
    ##The following is used to visualize the result if objective space dimension can be displayed i.e. 2D, 3D
    timing.log("NOW")
    if input_v['M'] == 2:
        ##Objective functions
        plt.plot(chromosome[:, input_v['V']], chromosome[:, input_v['V']+1], '*')
        plt.title('Objective function last population')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.show()
        
        ##All populations 
        plt.plot(objAll[:, 0], objAll[:, 1], '*')
        plt.title('Objective function all populations')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.show()        
    
    elif input_v['M'] == 3:
        ##Objective functions 
        plt.plot(chromosome[:, input_v['V']], chromosome[:, input_v['V']+1], chromosome[:, input_v['V']+2], '*')
        plt.title('Objective function last population')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.zlabel('Objective 3')
        plt.show()
        
        ##All populations 
        plt.plot(objAll[:,0], objAll[:,1], objAll[:,2], '*')
        plt.title('Objective function all populations')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.zlabel('Objective 3')        
        plt.show()           

    
##Copyright (c) 2009, Aravind Seshadri
##All rights reserved.

##Redistribution and use in source and binary forms, with or without  modification, are permitted provided that the following 
##conditions are met:

##   * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
##   * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer 
##     in the documentation and/or other materials provided with the distribution
##     
##THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT 
##NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
##THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 
##(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
##HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
##ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_all():
    """
    Runs a number of tests to check whether the python scripts of archive-update perform correctly.
    The name of the method needs to start with "test_" so that it gets picked up by py.test.
    """
    import timing

    prepare_archive_data()
    timing.log('prepare_archive_data done', timing.now())

    run_archive_update()
    timing.log('run_archive_update done', timing.now())

    run_archive_reformat()
    timing.log('run_archive_reformat done', timing.now())

    run_archive_split()
    timing.log('run_archive_split done', timing.now())

    run_archive_thinning()
    timing.log('run_archive_thinning done', timing.now())

    run_archive_analysis()
    timing.log('run_archive_analysis done', timing.now())

    run_archive_difference()
    timing.log('run_archive_difference done', timing.now())

    run_extract_extremes()
    timing.log('run_extract_extremes done', timing.now())

    cleanup_archive_data()
    timing.log('cleanup_archive_data done', timing.now())
Beispiel #26
0
    mgf = lambdify((c1, c2, x, p), expr)
    nlist = []
    for i in range(n):
        nlist.append(math.ceil(t / PSet[i]['period']))
    for i in nlist:
        prob = prob * (mgf(PSet[count]['NWCET'], PSet[count]['AWCET'], s,
                           PSet[count]['prob']))**int(i)
        count += 1
    prob = prob / exp(s * t)

    timing.log("Chernoff bound ends")
    return prob


taskInit()
timing.log("Task init")
selectedpoints(n - 1)  #3 tasks use 2, 2 tasks use 1
timing.log("Select k points")
pResult = .0
minP = 1.
plt.title('Probability of P(S>=a)')
plt.xlabel('t')
plt.ylabel('Probability')
'''
for y in Tpoints:
    overT = []
    t = []
    for x in np.arange(0, 1, 0.001):
        overT.append(Chernoff_bounds(float(y), x))
        t.append(x)
    print "min probability bound is ", min(overT)
Beispiel #27
0
def process_zip(zip_path, keep_dirs=True, keep_orig=False, save_rest=True):
    """
    Upload, Rencode, Reupload each MP3 in zip_path
    Upload ZIP of all rencoded files
    If keep_dirs is true, temporary files for unzip are not deleted
    If remove_orig is true, the original ZIP will be deleted
    """
    debug("ZIP path: %s\n\
           Keep temporary files: %s\n\
           Keep original ZIP: %s\n\
           Save non-ZIP files: %s" % (zip_path, keep_dirs, keep_orig, save_rest))
    debug("Loading ZIP file for reading")
    mixtape = zipfile.ZipFile(zip_path, 'r')
    zipped_name = None

    BASE_PATH = os.path.join(ROOT_DIR, 'output')
    FULL_DIR = os.path.join(BASE_PATH, 'full')
    STRIP_DIR = os.path.join(BASE_PATH, 'stripped')
    PREVIEW_DIR = os.path.join(BASE_PATH, 'preview')
    VIDEO_DIR = os.path.join(BASE_PATH, 'video')
    IMAGE_DIR = os.path.join(BASE_PATH, 'images')
    debug('Making temp folders')
    WORKING_DIRS = [FULL_DIR, STRIP_DIR, PREVIEW_DIR, VIDEO_DIR, IMAGE_DIR]
    for wdir in WORKING_DIRS:
        if not os.path.exists(wdir):
            os.mkdir(wdir)
    try:
        # Extract each file in the ZIP that ends with mp3 to the full folder
        # and then the stripped folder. If an error is raised, the folders we
        # just just created will be removed ina the finally block of this try.
        for name in mixtape.namelist():
            if "MACOSX" not in name:
                if name.lower().endswith('mp3'):
                    basename = os.path.basename(name)
                    if not basename.startswith("."):
                        path = os.path.join(FULL_DIR, basename)
                        debug('Extracting "%s" to "%s"' % (name, path))
                        data = mixtape.read(name)
                        f = open(path, 'w')
                        f.write(data)
                        f.close()
                elif name.lower().endswith('jpg'):
                    basename = os.path.basename(name)
                    if not basename.startswith("."):
                        path = os.path.join(IMAGE_DIR, basename)
                        debug('Extracting image "%s" to "%s"' % (name, path))
                        data = mixtape.read(name)
                        f = open(path, 'w')
                        f.write(data)
                        f.close()
        timing.log("Finished extracting", timing.clock() - timing.start)
        # Upload all of the files, stripping copies into the stripped folder
        with Connection() as conn:
            images = get_images(IMAGE_DIR)
            for name in os.listdir(FULL_DIR):
                local_start_time = timing.clock()
                debug('Processing "%s"' % name)
                full_path = os.path.join(FULL_DIR, name)
                stripped_path = os.path.join(STRIP_DIR, name)
                preview_path = os.path.join(PREVIEW_DIR, name)
                audiofile = eyed3.load(full_path)
                audiofile = clean_mp3_id3_tags(audiofile)
                if generate_strip(full_path, target_path=stripped_path):
                    conn.upload(name, local_dir=FULL_DIR)
                    conn.upload(name, local_dir=STRIP_DIR, remote_dir="128/")
                else:
                    debug("Not uploading because stripping apparently failed")
                if generate_preview(full_path, target_path=preview_path):
                    conn.upload(name, local_dir=PREVIEW_DIR, remote_dir="preview/")
                    video_path = os.path.join(VIDEO_DIR, name)
                    video_path = video_path.replace('mp3', 'mp4')
                    if images:
                        vid_args = {
                            'full_path': preview_path,
                            'target_path': video_path,
                            'image_path': images.pop()
                        }
                    else:
                        vid_args = {
                            'full_path': preview_path,
                            'target_path': video_path
                        }
                    # if generate_video(**vid_args):
                    #     ## upload to youtube
                    #     upload_youtube(
                    #         video_path,
                    #         config['youtube']['user'],
                    #         config['youtube']['password'],
                    #         audiofile.tag.title,
                    #         '%s - %s' % (audiofile.tag.artist, audiofile.tag.title)
                    #     )
                    # else:
                    #     debug("Unable to generate video file")
                else:
                    debug("Unable to generate preview file")
                timing.log(
                    "Finished processing \"%s\"" % name, timing.clock() - local_start_time
                )
            ## Call php script to pre-cache mp3 info
            debug("calling pre_cache php script: processid3.php")
            pre_cache_mp3_id3(conn.s3_path)
            ## generate zip archive, upload, and delete local copy
            zipped_name = zip_folder(FULL_DIR, name=os.path.basename(zip_path))
            conn.upload(zipped_name)
            os.remove(zipped_name)
    finally:
        debug('Cleaning up')
        if not keep_dirs:
            for wdir in WORKING_DIRS:
                shutil.rmtree(wdir)
        if not keep_orig:
            os.remove(zip_path)
        if not save_rest:
            clear_dir(os.path.join(ROOT_DIR, "data"))
    url = conn.url + zipped_name
    debug("ZIP processed")
    return url
def usingPANDAS(file):
    # set the size of the information to process at any time, so we're not sending it to memory
    chunks = (
        []
    )  # for storing all the chunks so that we can send them all to a merged dataframe later.

    # PANDAS has a jsonreader that works with chunks! AND it works well with line-delimitors
    timing.log("Starting reading-in")
    reader = pd.read_json(
        (file), lines=True, chunksize=100000, dtype=False
    )  # so it doesn't infer the type?
    # print(reader)
    # chunks=(flattenDict(chunk.to_dict(), "",{}) for chunk in reader)
    # chunks=[chunk for chunk in reader]
    # certs.append([pd.DataFrame(json_normalize(x)) for x in chunks['data']])
    # certs.append([pd.DataFrame(json_normalize(x)) for x in chunk['data'] for chunk in reader])
    # certsDF = pd.concat([pd.DataFrame(json_normalize(x)) for x in chunk['data']],ignore_index=True)
    timing.log("Starting chunk processing")
    for chunk in reader:
        # print(chunk)
        # columns=chunk.columns
        new = chunk["data"].apply(json.dumps)
        new = json_normalize(new.apply(json.loads))
        # new=chunk['data'].apply(flattenDict, args={'',{}})
        # print("new: \n", new)
        # chunk=pd.concat([chunk, new], axis=1)#.to_dict()
        # chunk.merge(new, how="outer")
        # chunk=chunk.update(new)
        for column in chunk.columns:
            if "data" != column:
                new[column] = chunk[column]
        del new["data"]  # remove data now because dictionary screws things up later.

        # print("type(new): ", type(new))
        # print("data in set(new.columns): ", "data" in set(new.columns))
        # print("chunk.keys: ", chunk.keys())
        # print("chunk.columns: ", chunk.columns)
        # print("chunk: ", chunk)
        # print("new: ", new)
        # print("new.columns: ", new.columns)
        # for column in columns:
        # 	# chunk[column]=eval('json_normalize(chunk.{}.apply(json.loads))'.format(column))
        # 	# chunk.merge(chunk, eval('json_normalize(chunk.{}.apply(json.loads))'.format(column)))
        # 	# print(type(chunk[column]))
        # 	# if isinstance(chunk[column], dict):
        # 	# 	new=pd.Dataframe(chunk[column])
        # 	# 	chunk=pd.concat([chunk, new], axis=1)
        # 	# 	print("chunk columns: ", chunk.columns)
        # 	# try:
        # 	# print(type(chunk[column]))
        # 	# print(chunk[column])
        # 	# # print(chunk[column].to_dict().keys())
        # 	# # raise("break")
        # 	# new=pd.DataFrame(chunk[column].values)#to_dict())
        # 	# # print(new.columns)
        # 	# chunk=pd.concat([chunk, new], axis=1)
        # 	# print("chunk columns: ", chunk.columns)

        # 	# new=pd.value_counts(chunk[column])
        # 	new=chunk[column].apply(pd.DataFrame.from_dict)
        # 	print("new: ", new)
        # 	chunk=pd.concat([chunk, new], axis=1)

        # 	# except:
        # 		# raise(Error)

        # chunks.append(chunk)
        # print("new: \n", new )
        # new.to_csv("temp_new.csv")
        chunks.append(new)
        # timing.log("just finish a loop") #THIS SHOWS IT TAKES ~.06seconds to run a loop.

        # certs=[pd.concat([pd.DataFrame(json_normalize(x)) for x in chunk['data']])]
        # print(chunk)
        # chunks.append(chunk)
        # chunks.append(certs)
    # print(certs[1])

    """		
	NEW plan. Clearly the chunking is a good idea, but we still have to deal with the json strings. So let's try to normalize those AFTER. If the column contains strings that are valid jsons, convert them.
	Nope. Not working. We can't treat them like jsons, because they're going in as dictionaries. They're not acting like dictionaries, though. The data column is a series. I want to be able to extract the dictionaries and then attach them to the rest of the dataframe, so we preserve any columns that aren't the 'data'. 
	"""

    # now make a major dataframe
    # certsDF=reduce(lambda x, y: pd.merge(x, y), chunks)
    # print(type(chunks))
    # print(len(chunks))
    # print("data in set(new.columns): ", "data" in set(new.columns))
    # print("set(new.columns): ", set(new.columns))
    # certsDF=pd.DataFrame(chunks)
    timing.log("Starting Concat")
    certsDF = pd.concat(chunks, ignore_index=True, sort=True)
    # certsDF=reduce(lambda x, y: pd.merge(x, y), chunks)
    # print(certsDF.shape)
    # certsDF=pd.DataFrame(certs)
    # print(certsDF.columns)
    timing.log("finding dups")
    dups = certsDF.duplicated(subset="leaf_cert.fingerprint")
    dups = certsDF[dups]
    timing.log("writing dups to file")
    dups.to_csv("duplicates_PANDAS.csv")

    """
Beispiel #29
0
if val.debug: val.verbose = True;

def save_sparse_csc(filename,array):
	np.savez(filename, data = array.data, indices=array.indices, indptr=array.indptr, shape=array.shape )


config = np.load('config.npz')['config'];
config = config.reshape(1)[0];
if val.num_neigh != 0: config['n_neighbors'] = val.num_neigh

icacoffs = np.load('savefiles/coord_icacoffs%s_%id_%i-%it.npy' %(config['pname'], config['icadim'], config['startTraj']+1, config['startTraj'] + config['numOfTraj']))
numSamples = icacoffs.shape[1]
nbrs = nb(n_neighbors=config['n_neighbors'], algorithm='kd_tree').fit(icacoffs[ :config['affdim'] ].T);

if val.verbose: timing.log('Tree constructed...\n\nBeginning search for %i nearest neighbors using only %i dimensions...' %(config['n_neighbors'], config['affdim']));

distances, indices = nbrs.kneighbors(icacoffs[ :config['affdim'] ].T);


#	Affgen	-----------------------------------------------------------------------------

indptr = np.arange(0,config['n_neighbors']*numSamples+1, config['n_neighbors']);
indices = indices.flatten();
data = np.exp(-(distances.flatten() ** 2.0));

affMat = csc_matrix( (data, indices, indptr), shape = (numSamples,numSamples) );

if val.graph:
	plt.spy(affMat, precision = .1, markersize = 1);
	plt.show();
def createOutput(text,outputFile):
    timing.log(text, outputFile)
Beispiel #31
0
def test_all():
    """
    Runs a number of tests to check whether the python scripts of archive-update perform correctly.
    The name of the method needs to start with "test_" so that it gets picked up by py.test.
    """
    import timing

    prepare_archive_data()
    timing.log('prepare_archive_data done', timing.now())

    run_archive_update()
    timing.log('run_archive_update done', timing.now())

    run_archive_reformat()
    timing.log('run_archive_reformat done', timing.now())

    run_archive_split()
    timing.log('run_archive_split done', timing.now())

    run_archive_thinning()
    timing.log('run_archive_thinning done', timing.now())

    run_archive_analysis()
    timing.log('run_archive_analysis done', timing.now())

    run_archive_difference()
    timing.log('run_archive_difference done', timing.now())

    run_extract_extremes()
    timing.log('run_extract_extremes done', timing.now())

    cleanup_archive_data()
    timing.log('cleanup_archive_data done', timing.now())
Beispiel #32
0
                    ) % nct  # Another point outside n[0],n[1] segment. See picture in lecture nodes!
                    n[4] = nc
                    n[5] = (nc + 1) % nct

                    # Cost to transpose a segment
                    de = -Distance(R[city[n[1]]], R[city[n[3]]]) - Distance(
                        R[city[n[0]]], R[city[n[2]]]) - Distance(
                            R[city[n[4]]], R[city[n[5]]])
                    de += Distance(R[city[n[0]]], R[city[n[4]]]) + Distance(
                        R[city[n[1]]], R[city[n[5]]]) + Distance(
                            R[city[n[2]]], R[city[n[3]]])

                    if de < 0 or exp(-de / T) > rand():  # Metropolis
                        accepted += 1
                        dist += de
                        city = transpt(city, n)

                if accepted > maxAccepted: break

            # Plot
            Plot(city, R, dist)

            print "T=%10.5f , distance= %10.5f , accepted steps= %d" % (
                T, dist, accepted)
            T *= fCool  # The system is cooled down
            if accepted == 0:
                break  # If the path does not want to change any more, we can stop

        Plot(city, R, dist)
        timing.log("clawk.")
# Grab the dissolved water geometry feature
fields = ['OID@', 'SHAPE@']
with arcpy.da.SearchCursor(water_dissolve, fields) as cursor:
	for oid, geom in cursor:
		water_geom = geom

# Union the natural area and water features into a single geometry
with arcpy.da.UpdateCursor(nat_areas_dissolve, fields) as cursor:
	for oid, geom in cursor:
		geom = geom.union(water_geom)
		cursor.updateRow((oid, geom))

# Assign the parks/water feature class to more appropriately named variable
water_and_nat_areas = nat_areas_dissolve

timing.log('water and natural areas dissolved and merged')

# Erase merged parks/water features from property data
# Consider try multi-processing for this step at some point as it is very computationally intensive:
# http://blogs.esri.com/esri/arcgis/2011/08/29/multiprocessing/
trimmed_taxlots = os.path.join(env.workspace, 'trimmed_taxlots.shp')
arcpy.analysis.Erase(taxlots, water_and_nat_areas, trimmed_taxlots)

timing.log('tax lots trimmed')

trimmed_multifam = os.path.join(env.workspace, 'trimmed_multifam.shp')
arcpy.analysis.Erase(multi_family, water_and_nat_areas, trimmed_multifam)

timing.log('multi-family housing trimmed')
# ran in 56:47 on 5/19/14
def readInOutIn(file):
    chunks = []
    folder = path.join(getcwd(), "temp")
    if not path.isdir(folder):
        mkdir(folder)

    timing.log("Starting reading-in")
    reader = pd.read_json((file), lines=True, chunksize=100000, dtype=False)

    timing.log("Starting chunk processing")
    label = (
        0
    )  # for keeping track of the files. Could just do enumerate, but why get the length of reader? It could be huge.
    for chunk in reader:
        new = chunk["data"].apply(json.dumps)
        new = json_normalize(new.apply(json.loads))

        for column in chunk.columns:
            if "data" != column:
                new[column] = chunk[column]
        del new["data"]  # remove data now because dictionary screws things up later.

        # NOW WRITE OUT!
        timing.log("Writing csv chunk %s" % label)
        new.to_csv(path.join(folder, "chunk_%s.csv" % label), chunksize=100000)
        label += 1

    timing.log("Reading back in!")
    files = glob(path.join(folder, "*.csv"))
    for f in files:
        new = pd.read_csv(file)
        chunks.append(new)

    # now convert the list of dataframes certsDF=pd.concat(chunks, ignore_index=True, sort=True)into a single dataframe
    certsDF = pd.concat(chunks, ignore_index=True, sort=True)

    timing.log("finding dups")
    dups = certsDF.duplicated(subset="leaf_cert.fingerprint")
    dups = certsDF[dups]
    timing.log("writing dups to file")
    dups.to_csv("duplicates_readInOutIn.csv")
Beispiel #35
0
def nsga_2(input_v, activate_parallel, num_cores, crossover_perc,
           mutation_perc, obj_func_plot):
    ##import sys
    import numpy as np
    from objective_description_function import objective_description_function
    from initialize_variables import initialize_variables
    from non_domination_sort_mod import non_domination_sort_mod
    from tournament_selection import tournament_selection
    import genetic_operator_adv
    from replace_chromosome import replace_chromosome
    import matplotlib.pyplot as plt
    from ga_mono_binary_conv_info import ga_mono_binary_conv_info
    print('Running Optimization...')
    input_v['objAll'] = []
    input_v['xAll'] = []
    import os
    import timing
    import time
    ##Making sure thet population and generations are integers
    input_v['population'] = round(input_v['population'])
    input_v['generations'] = round(input_v['generations'])

    bin_info_variable_list = ga_mono_binary_conv_info(
        input_v['range_of_decision_variables'])
    total_binary_len = sum(bin_info_variable_list['Bits'][:])

    ##Forming the objective function
    ##input_v['M'] --- number of objective functions
    ##input_v['V'] --- number of decision variables
    ##input_v['min_range'] --- list of corresponding lowerbound of the decision variables
    ##input_v['max_range'] --- list of corresponding upperbound of the decision variables
    input_v['M'], input_v['V'], input_v['min_range'], input_v[
        'max_range'] = objective_description_function(input_v)

    ##Initialize the population
    ##Population is initialzied with random values which are within the specified range. Each chromosome consist of the
    ##decision variables. Also the value fo the objective functions, rank and crowding distance information are also added
    ##to the chromosome vector but only the elements of the vector which has the decision variables are operatedupon to
    ##perform the genetic operations like crossover and mutation

    chromosome, input_v = initialize_variables(input_v, activate_parallel,
                                               num_cores,
                                               bin_info_variable_list)

    ##Sort the initialized population
    ##Sort the population using non-domination-sort. This returns two columns for each individual which are the rank and the
    ##crowding distance corresponding to their position in the front they belong. At this stage the rank and the crowding distance
    ##for each chromosome is added to the chromosome vector for easy of computation.
    chromosome = non_domination_sort_mod(chromosome, input_v['M'],
                                         input_v['V'])

    ## Start the evolution process
    # The following are performed in each generation
    # * Select the parents which are fit for reproduction
    # * Perfrom crossover and Mutation operator on the selected parents
    # * Perform Selection from the parents and the offsprings
    # * Replace the unfit individuals with the fit individuals to maintain a
    #   constant population size.

    ##Generating probability reference for the mutation process
    mutation_prob = mutation_probab(bin_info_variable_list, total_binary_len)
    axes = plt.gca()
    print('Starting search process...')

    for i in range(0, input_v['generations']):

        ## Select the parents
        ## Parents are selected for reproduction to generate offspring. The
        ## original NSGA-II uses a binary tournament selection based on the
        ## crowded-comparision operator. The arguments are
        ## pool - size of the mating pool. It is common to have this to be half the
        ##        population size.
        ## tour - Tournament size. Original NSGA-II uses a binary tournament
        ##        selection, but to see the effect of tournament size this is kept
        ##        arbitary, to be choosen by the user.

        pool = int(round(input_v['population'] / 2))
        tour = 2

        ## Selection process
        ## A binary tournament selection is employed in NSGA-II. In a binary
        ## tournament selection process two individuals are selected at random
        ## and their fitness is compared. The individual with better fitness is
        ## selcted as a parent. Tournament selection is carried out until the
        ## pool size is filled. Basically a pool size is the number of parents
        ## to be selected. The input arguments to the function
        ## tournament_selection are chromosome, pool, tour. The function uses
        ## only the information from last two elements in the chromosome vector.
        ## The last element has the crowding distance information while the
        ## penultimate element has the rank information. Selection is based on
        ## rank and if individuals with same rank are encountered, crowding
        ## distance is compared. A lower rank and higher crowding distance is
        ## the selection criteria.

        parent_chromosome = tournament_selection(chromosome, pool, tour)

        ## Perfrom crossover and Mutation operator
        ## The original NSGA-II algorithm uses Simulated Binary Crossover (SBX) and
        ## Polynomial  mutation. Crossover probability pc = 0.9 and mutation
        ## probability is pm = 1/n, where n is the number of decision variables.
        ## Both real-coded GA and binary-coded GA are implemented in the original
        ## algorithm, while in this program only the real-coded GA is considered.
        ## The distribution indeices for crossover and mutation operators as mu = 20
        ## and mum = 20 respectively.

        mu = 20
        mum = 20
        parent_chrom_bin = conv_dec_to_bin(bin_info_variable_list,
                                           parent_chromosome, input_v['M'])

        offspring_chromosome, input_v = genetic_operator_adv.genetic_operator_adv(
            parent_chrom_bin, mutation_prob, input_v, bin_info_variable_list,
            activate_parallel, num_cores)

        ##Intermediate population
        ##Intermediate population is the combined population of parents and offsprings of the current generation. The population
        ##size is two times the initial population.

        dim_chromosome = chromosome.shape
        main_pop = dim_chromosome[0]

        dim_offspring_chromosome = offspring_chromosome.shape
        offspring_pop = dim_offspring_chromosome[0]

        ##Intermediate_chromosome is a concatenation of current population and the offspring population.
        intermediate_chromosome = np.zeros(
            (main_pop + offspring_pop, input_v['M'] + input_v['V']))
        for j in range(0, main_pop):
            for k in range(0, input_v['M'] + input_v['V']):
                intermediate_chromosome[j, k] = chromosome[j, k]
        for j in range(main_pop, main_pop + offspring_pop):
            for k in range(0, input_v['M'] + input_v['V']):
                intermediate_chromosome[j,
                                        k] = offspring_chromosome[j - main_pop,
                                                                  k]

        ##Non-domination-sort of intermediate population
        ##The intermediate population is sorted again based on non-domination sort before the replacement operator is performed
        ##on the intermediate population.

        intermediate_chromosome = non_domination_sort_mod(
            intermediate_chromosome, input_v['M'], input_v['V'])
        ##np.savetxt('C:\\Optimization_zlc\\solution.csv', intermediate_chromosome, delimiter=',')
        ##sys.exit()

        ##Perform Selection
        ##Once the intermediate population is sorted only the best solution is selected based on it rank and crowding distance.
        ##Each front is filled in ascending order until the addition of population size is reached. The last front is included
        ##in the population based on the individuals with least crowding distance

        chromosome = replace_chromosome(intermediate_chromosome, input_v['M'],
                                        input_v['V'], input_v['population'])

        if obj_func_plot == 'yes':
            plt.plot(chromosome[:, input_v['V']],
                     chromosome[:, input_v['V'] + 1], '*')
            plt.title('Objective function last population')
            plt.xlabel('Objective 1')
            plt.ylabel('Objective 2')

            plt.draw()
            plt.pause(1e-17)
            axes.clear()

        print('Search ' + 'iteration ' + str(i))


#        if not(i%100) and (i !=0):
#            disp_msg = str(i) + ' generations completed \n'
#            print(disp_msg)

##Result
##Save the result in csv format
    xAll = input_v['xAll']
    objAll = input_v['objAll']

    np.savetxt(os.path.dirname(__file__) + '\\solution\\solution.csv',
               chromosome,
               delimiter=',')
    np.savetxt(os.path.dirname(__file__) + '\\solution\\solutionX.csv',
               xAll,
               delimiter=',')
    np.savetxt(os.path.dirname(__file__) + '\\solution\\solutionObj.csv',
               objAll,
               delimiter=',')

    ##Visualize
    ##The following is used to visualize the result if objective space dimension can be displayed i.e. 2D, 3D
    timing.log("NOW")
    if input_v['M'] == 2:
        ##Objective functions
        plt.plot(chromosome[:, input_v['V']], chromosome[:, input_v['V'] + 1],
                 '*')
        plt.title('Objective function last population')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.show()

        ##All populations
        plt.plot(objAll[:, 0], objAll[:, 1], '*')
        plt.title('Objective function all populations')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.show()

    elif input_v['M'] == 3:
        ##Objective functions
        plt.plot(chromosome[:, input_v['V']], chromosome[:, input_v['V'] + 1],
                 chromosome[:, input_v['V'] + 2], '*')
        plt.title('Objective function last population')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.zlabel('Objective 3')
        plt.show()

        ##All populations
        plt.plot(objAll[:, 0], objAll[:, 1], objAll[:, 2], '*')
        plt.title('Objective function all populations')
        plt.xlabel('Objective 1')
        plt.ylabel('Objective 2')
        plt.zlabel('Objective 3')
        plt.show()
Beispiel #36
0
def secondTry():
    arrayy = [1-15, 2-15, 'Fizz', 4-15, 'Buzz', 'Fizz', 7-15, 8-15, 'Fizz', 'Buzz', 11-15, 'Fizz', 13-15, 14-15, 'FizzBuzz'] # It' the list with the first 15 elements to be printed, just subtracting 15 from each number, because of the first iteration below
    for j in range(7):
        for i in range(len(arrayy)):
            if j*15+i>99 : return
            try:
                arrayy[i] += 15
                print(arrayy[i])
            except TypeError: # String + Number throws this error
                print(arrayy[i])
            finally:
                print()


#########################
#  RUNNING
#########################
import timing # With the import the time starts counting!
firstTry()
firstTryTime = timing.log("Finished First Try")
secondTry()
secondTryTime = timing.log("Finished Second Try", firstTryTime)


#########################
# RESULTS
#########################
print("Difference between First and Second Try")
diff_1_2 = secondTryTime-firstTryTime
print('Exact time: ', secondTryTime-firstTryTime)
print('Rounded time: ', timing.secondsToStr(secondTryTime-firstTryTime))
Beispiel #37
0
for f in functions:
    header.append("%s_ttSize" %(f.func_name))
    header.append("%s_sizeRatio" %(f.func_name))
    header.append("%s_tensorCreationTime" %(f.func_name))
    header.append("%s_tensorCompressionTime" %(f.func_name))
    
results = [] # Each dim: [d,n,sizefull,f1_ttSize,f1_sizeRatio,...]

for d in D:
    print("### DIMENSION %d ###" %(d))
    xVec = np.meshgrid(*tuple([vtheta for x in range(d)]))

    localResults = [d,n,n**d]

    for f in functions:
        t0 = timing.log("Creating full tensor of function %s with n=%d, d=%d" %(f.func_name,n,d))
        tensor = f(np.asarray(xVec))
        tFoo, tDiffCreation = timing.log("Full tensor created", t0)
        
        t0 = timing.log("Compressing tensor to TT")
        a = tt.tensor(tensor)
        tFoo, tDiffCompression = timing.log("Tensor compressed", t0)
        
        s_tensor = n**d
        
        print("Size of full tensor: %d params" %(s_tensor))
        
        s_a = numparams(a)
        
        print("Size of tt representation: %d params" %(s_a))
        
    parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 16),
                        help='instance numbers to be included in the processing of archives')
    parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],
                        help='dimensions to be included in the processing of archives')
    parser.add_argument('--merge-only', action='store_true',
                        help='perform only merging of archives, do not update hypervolume values')
    parser.add_argument('--crop-variables', action='store_true',
                        help='don\'t include information on the variables in the output archives')
    parser.add_argument('--hyp-file', default='new_best_values_hyp.c',
                        help='name of the file to store new hypervolume values')
    parser.add_argument('output', help='path to the output folder')
    parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
    args = parser.parse_args()

    print('Program called with arguments: \ninput folders = {}\noutput folder = {}'.format(args.input, args.output))
    print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(args.functions, args.instances, args.dimensions))

    # Merge the archives
    new_hypervolumes = merge_archives(args.input, args.output, args.functions, args.instances, args.dimensions,
                                      args.crop_variables)

    timing.log('Finished merging', timing.now())

    # Use files with best hypervolume values from the src folder and update them with the new best values
    if not args.merge_only:
        base_path = os.path.dirname(__file__)
        file_names = ['suite_biobj_best_values_hyp.c']
        file_names = [os.path.abspath(os.path.join(base_path, '..', '..', 'code-experiments/src', file_name))
                      for file_name in file_names]
        update_best_hypervolume(file_names, new_hypervolumes, os.path.join(args.output, '..', args.hyp_file))
 from timeit import default_timer as timer
 from timeit import gc
 total = list()
 iterations = 10
 # for i in range(iterations):
 #     start = timer()
 #     gc.enable()
 #     connect_graph2(G)
 #     end = timer()
 #     total.append((end - start))
 # print('connect graph: ',np.mean(total), np.std(total))
 # total = list()
 for i in range(iterations):
 #     start = timer()
 #     gc.enable()
     timing.log('Connect graph started')
     time = timing.time()
     connect_graph3(G)
     timing.log('Connect graph ended', timing.time()-time)
 # plt.plot(times0)
 # plt.plot(times1)
 # plt.plot(times11)
 # plt.plot(times2)
 # plt.plot(times3)
 # plt.plot(times4)
 # plt.plot(times5)
 print('complete while loop ocurred ', len(times0), ' with mean ',  np.mean(times0),   ' total cost ', len(times0)*np.mean(times0))
 print('get_oposition_points_by_distance ocurred ', len(times1), ' with mean ',  np.mean(times1),   ' total cost ', len(times1)*np.mean(times1))
 print('get_triangles_to_look ocurred ', len(times11), ' with mean ', np.mean(times11), ' total cost ', len(times11)*np.mean(times11))
 print('for candidate loop ocurred ', len(times2), ' with mean ',  np.mean(times2),   ' total cost ', len(times2)*np.mean(times2))
 print('create new_triangle ocurred ', len(times3), ' with mean ',  np.mean(times3),   ' total cost ', len(times3)*np.mean(times3))
Beispiel #40
0
        second_str = str(second_now)
    folder_dir += "//" + str(crystal_label) + "_" + month_str + "_" + date_str + "_" + year_str + "_" + hour_str\
                  + minute_str + "_" + second_str
    os.makedirs(folder_dir)
    txtfl = open(folder_dir + "//data.txt", 'w')
    txtfl.write("crystal: " + crystal_label + "\n")
    txtfl.write("loops: %s\n" % loops)

    printfl("initalizing object...")
    # If specified as default, replace with default values
    if rise == "default":
        rise = 1
    if tail == "default":
        tail = 30
    pltr = tester.Plotter(get_data(serialportname, loops, chunkSize), rise, tail, folder_dir)
    timing.log("object initalized")
    txtfl.write("object initalized at: %s\n" % timing.log_return())

    # If requested, save raw data to text file
    if save_bool == "y":
        pltr.save_data(txtfl)
        # Uncomment this below if you want the raw data written to a separate text file
        # You might want this because for longer loops, raw data can take up a lot of space and make it difficult to
        # open the text file
        """
        raw_data_txtfl = open(folder_dir + "//raw_data.txt", 'w')
        pltr.save_data(raw_data_txtfl)
        raw_data_txtfl.close()
        """

    printfl("calculating trigger...")
Beispiel #41
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
                        help='function numbers to be included in the processing of archives')
    parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
                        help='instance numbers to be included in the processing of archives')
    parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5],
                        help='dimensions to be included in the processing of archives')
    parser.add_argument('-l', '--lower_bound', type=float, default=-5.0,
                        help='lower bound of the decision space')
    parser.add_argument('-u', '--upper_bound', type=float, default=5.0,
                        help='upper bound of the decision space')
    parser.add_argument('output', help='path to the output folder')
    parser.add_argument('summary', help='file name for the summary')
    parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
    args = parser.parse_args()

    print('Program called with arguments: \ninput folders = {}\noutput folder = {}'.format(args.input, args.output))
    print('summary file = {}'.format(args.summary))
    print('functions = {} \ninstances = {}\ndimensions = {}'.format(args.functions, args.instances, args.dimensions))
    print('lower bound = {} \nupper bound = {}\n'.format(args.lower_bound, args.upper_bound))

    # Analyze the archives
    archive_analysis(args.input, args.output, args.lower_bound, args.upper_bound, args.functions, args.instances,
                     args.dimensions)

    timing.log('Finished reading data', timing.now())

    summary_analysis(args.output, args.summary, args.lower_bound, args.upper_bound, args.functions, args.instances,
                     args.dimensions)
Beispiel #42
0
 for percentageU in range(60, 61, 10):
     perfault = []
     for fr in faultRate:
         numberOfRuns = 1
         print('Tasks: ' + repr(tib) + ', NumDeadline:' + repr(nd) +
               ', FaultRate:' +
               repr(fr)) + ', Utilization:' + repr(percentageU)
         seq_prob = []
         for i in range(numberOfRuns):
             tasks = []
             tasks = task_generator.taskGeneration_p(
                 tib, percentageU)
             tasks = mixed_task_builder.hardtaskWCET(tasks, htf, fr)
             keepTasks = tasks[:]
             #the following part is for testing
             timing.log("ptda method starts include opt")
             resP = EPST.probabilisticTest_ptda(tasks, nd, 3)
             timing.log("ptda method ends include opt")
             timing.log("our method starts include opt")
             resP = EPST.probabilisticTest_p(tasks, nd, 3)
             timing.log("our method ends include opt")
             seq_prob.append(
                 resP
             )  #this will get the maximum probability among the tasks in a task set.
         perfault.append(seq_prob)
         #afterward, perfault list contains various faultRate results
     fileName = 'tasks' + repr(tib) + '_numMisses' + repr(
         nd) + '_utilization' + repr(percentageU)
     folder = repr(numberOfRuns) + '/'
     file = open(folder + 'txt/' + fileName + '.txt', "w")
     file.write('Runs: ' + repr(numberOfRuns) + '\n')
    # 8:18.195 processor time.

    # I use iGraph to generate an Erdos-Renyi random graph using the experiment parameters for number of nodes and edge
    # probability. Each node gets an attribute "hasOpinion", set by default to 'A' and a percentage are changed to 'B'.
    #
    print "Generating Erdos-Renyi random graph with iGraph. Note: time to completion in next time stamp."
    erGraph = Graph.Erdos_Renyi(N, EP, directed=True, loops=False)
    erGraph.vs["hasOpinion"] = 'A'

    # Give a random sample of nodes the minority opinion
    b_list = random.sample(range(N), P)
    for b in b_list:
        erGraph.vs[b]["hasOpinion"] = 'B'

    # Print time stamp for graph creation
    timing.log(clock())

    # # REPL Timeout may have been root cause of occasional 137 exits on graphs with 100K+ nodes; snooze to prevent.
    # print "Sleeping for 60 seconds before starting the model..."
    # sleep(60)
    # timing.log(clock())

    # g = erGraph # Copy the Erdos-Renyi random graph to a working copy; don't want to destroy raw data
    # #
    iReport()

    data = []

    while iReport()[2] != 0.0 and iReport()[2] != 1.0:
        iInteract()
        data.append(iReport())
Beispiel #44
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
                        help='function numbers to be included in the processing of archives')
    parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
                        help='instance numbers to be included in the processing of archives')
    parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5],
                        help='dimensions to be included in the processing of archives')
    parser.add_argument('-l', '--lower_bound', type=float, default=-5.0,
                        help='lower bound of the decision space')
    parser.add_argument('-u', '--upper_bound', type=float, default=5.0,
                        help='upper bound of the decision space')
    parser.add_argument('output', help='path to the output folder')
    parser.add_argument('summary', help='file name for the summary')
    parser.add_argument('input',  help='path to the input folder')
    args = parser.parse_args()

    print('Program called with arguments: \ninput folder = {}\noutput folder = {}'.format(args.input, args.output))
    print('summary file = {}'.format(args.summary))
    print('functions = {} \ninstances = {}\ndimensions = {}'.format(args.functions, args.instances, args.dimensions))
    print('lower bound = {} \nupper bound = {}\n'.format(args.lower_bound, args.upper_bound))

    # Analyze the archives
    archive_analysis(args.input, args.output, args.lower_bound, args.upper_bound, args.functions, args.instances,
                     args.dimensions)

    timing.log('Finished reading data', timing.now())

    summary_analysis(args.output, args.summary, args.lower_bound, args.upper_bound, args.functions, args.instances,
                     args.dimensions)
Beispiel #45
0
            % config.Arguments.program_file)

    config.Arguments.test_specification_file = os.path.splitext(
        config.Arguments.program_file)[0] + '.test'
    if not os.path.exists(config.Arguments.test_specification_file):
        debug.exit_message(
            "Expected to find the test specification file '%s' but it is not there"
            % config.Arguments.test_specification_file)


if __name__ == "__main__":
    the_command_line()
    debug.verbose_message(
        "%s Analysing program '%s' %s" %
        ('*' * 10, config.Arguments.program_file, '*' * 10), __name__)
    time1 = timing.log("COMPILING BEGIN")
    binary, program = get_binary_and_program()
    time2 = timing.log("COMPILING END")
    if config.Arguments.compile:
        debug.exit_message("DONE")
    if config.Arguments.gem5_traces:
        check_trace_files()
    else:
        set_gem5_variables()
        if config.Arguments.ga:
            debug.verbose_message("Using GA to generate test vectors",
                                  __name__)
            config.Arguments.gem5_traces.extend(testing.runGAGem5(binary))
        else:
            debug.verbose_message(
                "Running program on gem5 with %d tests" %
Beispiel #46
0
    parser.add_argument('input',
                        default=[],
                        nargs='+',
                        help='path(s) to the input folder(s)')
    args = parser.parse_args()

    print(
        'Program called with arguments: \ninput folders = {}\noutput folder = {}'
        .format(args.input, args.output))
    print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(
        args.functions, args.instances, args.dimensions))

    # Merge the archives
    new_hypervolumes = merge_archives(args.input, args.output, args.functions,
                                      args.instances, args.dimensions,
                                      args.crop_variables)

    timing.log('Finished merging', timing.now())

    # Use files with best hypervolume values from the src folder and update them with the new best values
    if not args.merge_only:
        base_path = os.path.dirname(__file__)
        file_names = ['suite_biobj_best_values_hyp.c']
        file_names = [
            os.path.abspath(
                os.path.join(base_path, '..', '..', 'code-experiments/src',
                             file_name)) for file_name in file_names
        ]
        update_best_hypervolume(file_names, new_hypervolumes,
                                os.path.join(args.output, '..', args.hyp_file))
# Clip the union layer to only cover the extent of the OSM bounding box defined above
zip_union_clip = 'in_memory/zip_union_clip'
arcpy.analysis.Clip(zip_union, b_box_fc, zip_union_clip)

# Add field to store final zip code of for unioned regions
final_zip_field = 'zip_code'
f_type = 'TEXT'
arcpy.management.AddField(zip_union_clip, final_zip_field, f_type)

# Using the established heirarchy amongst the datasets consolidate all of the zip code value into a single field
fields = [final_zip_field, rlis_zip_field, or_zip_field, tiger_zip_field]
with arcpy.da.UpdateCursor(zip_union_clip, fields) as cursor:
	for final_zip, rlis_zip, or_zip, tiger_zip in cursor:
		if rlis_zip != '':
			final_zip = rlis_zip
		# In the State of Oregon zip code file there are some zips that don't begin with a '9', these seem
		# to be invalid and are being excluded
		elif or_zip != '' and fnmatch.fnmatch(or_zip, '9*'):
			final_zip = or_zip
		elif tiger_zip != '':
			final_zip = tiger_zip

		cursor.updateRow((final_zip, rlis_zip, or_zip, tiger_zip))

# Dissolve zip code boundaries based on the unified 'zip' field
zip_final = os.path.join(env.workspace, 'data/or_wa_zip_codes.shp')
dissolve_field = final_zip_field
arcpy.management.Dissolve(zip_union_clip, zip_final, dissolve_field)

timing.log('Compilation of jurisdictional data complete')
java_import(gateway.jvm, 'edu.mit.csail.sdg.alloy4compiler.ast.Attr')
java_import(gateway.jvm, 'edu.mit.csail.sdg.alloy4compiler.ast.Func')
java_import(gateway.jvm, 'edu.mit.csail.sdg.alloy4compiler.ast.Command')
java_import(gateway.jvm, 'edu.mit.csail.sdg.alloy4compiler.ast.ExprConstant')
# java_import(gateway.jvm,'edu.mit.csail.sdg.alloy4.Util')
wrapper = gateway.entry_point

A4Options = gateway.jvm.A4Options
Attr = gateway.jvm.Attr
Func = gateway.jvm.Func
Command = gateway.jvm.Command
ExprConstant = gateway.jvm.ExprConstant
TranslateAlloyToKodkod = gateway.jvm.TranslateAlloyToKodkod
ArrayList = gateway.jvm.ArrayList
Array = gateway.jvm.Arrays
timing.log("Start Execution")

satsolver = wrapper.getSatSolver()
opt = wrapper.getA4Options()
opt.solver = satsolver
A = wrapper.getPrimSig("A", Attr.ABSTRACT)
B = wrapper.getSimplePrimSig("B")
A1 = wrapper.getParentPrimSig("A1", A, Attr.ONE)
A2 = wrapper.getParentPrimSig("A2", A, Attr.ONE)
f = A.addField("f", B.lone_arrow_lone(B))
g = A.addField("g", B)

someG = Func(None, "SomeG", None, None, g.some())

x = wrapper.getStandardPrimSig(1).oneOf("x")
y = wrapper.getStandardPrimSig(1).oneOf("y")