コード例 #1
0
    def exitProgram(self, ctx:LittleExprParser.ProgramContext):
        #print("\n".join(self.printSymbolTable))
        #print(self.allCode)
        AST.functReturns = self.functReturns

        globalVars = ([var for var in self.symbolTable[-1].keys() if self.symbolTable[-1][var][0] != "STRING"])
        CFGList = []
        self.tinyGenerator = TinyGenerator(self.allCode, stringInit=1)
        tinyCode = ""
        for var in globalVars:
            tinyCode += "var {0}\n".format(var)

        tinyCode += self.tinyGenerator.generate()
        # tinyCode += "push\npush r0\npush r1\npush r2\npush r3\njsr main\nsys halt\n"
        tinyCode += "push\njsr main\nsys halt\n"
        # tinyCode += "push\njsr main\nsys halt\n"
        
        for functNode in self.functNodeList:
            AST.tempRegNum = 1
            functCode = functNode.generateCode()
            functOptimizer = Optimizer(functCode)
            functCode = functOptimizer.optimize()

            functTinyGen = TinyGenerator(functCode, globalVariables=globalVars, functName=functNode.functName)
            self.allCode += functCode
            tinyCode += functTinyGen.generate()

        self.symbolTable.pop()
        self.tinyCode = tinyCode
        # self.tinyGenerator = TinyGenerator(self.allCode)
        # self.tinyGenerator.generate()

        self.printTinyIR()
        pass
コード例 #2
0
ファイル: main.py プロジェクト: jaredp/PythonCompiler
def _main(mainfile):
	pname = mainfile.rpartition('.')[0]
	cppfile = pname + '.cpp'

	program = AstTranslator.translateFile(mainfile)
	Optimizer.correct(program)

	if '-O' in command_line_flags:
		Optimizer.optimize(program)

	if '-n' in command_line_flags:
		return

	if '-i' in command_line_flags:
		program.pprint()
		return

	IRtoC.translateProgram(program, open(cppfile, 'w'))

	if '-ng' not in command_line_flags:
		CppCompiler.build(cppfile, pname,
			'-gcc' in command_line_flags,
			'-W' in command_line_flags,
			'-g' in command_line_flags
		)
コード例 #3
0
def main(args, folder):
    overwrite_data = False

    run_description = 'Simulated aberrations and images for Deep Learning Training \n\
Lab 118. \n\
112x112 image dimensions. \n\
Zernike Polynomials 1-35. \n\
Fully randomized aberration generation. \n\
Zernike optimization using corrected scaling. \n\
zbasis = True. \n\
Exposure value at -6.'

    os.makedirs(folder, exist_ok=True)
    shutil.copy(sys.argv[0], folder + '/mainscript.py')
    shutil.copystat(sys.argv[0], folder + '/mainscript.py')

    file = open(folder + '/log.txt', 'w+')
    print('Run Description: ', run_description)
    file.write('Description: ' + run_description + '\n\n')
    file.close()
    interface = Interface.Interface(args)

    args.num_initial_metrics = 10
    args0 = copy.copy(args)

    optimize_zernike = True

    zopt_mask = 0
    if optimize_zernike:
        args = copy.copy(args0)
        ##        args.save_path = folder+'/zopt'
        args.save_path = folder[:folder.rfind('DL_data') + len('DL_data')]
        zmodes = np.arange(3, 49)
        zopt = Optimizer.Optimizer(args, interface)
        if os.path.isfile(args.save_path + '/optimized_zmodes.txt'):
            print('Loading zmodes from file...')
            opt_zmodes = np.loadtxt(args.save_path + '/optimized_zmodes.txt')
            print(opt_zmodes)
            zopt_mask = zopt.parent_masks.create_zernike_mask(opt_zmodes)
            print(zopt_mask.shape)
        else:
            args.save_path = folder + '/zopt'
            zopt.run_zernike(zmodes, [-600, 600])
            zopt_mask = zopt.parent_masks.get_slm_masks()[-1]

    coeff_range = [-128, 128]
    DLmodes = np.arange(1, 36)
    num_data = 1000000
    batch_size = 1000

    args = copy.copy(args0)
    args.save_path = folder + '/DLdata'
    DLopt = Optimizer.Optimizer(args, interface, base_mask=zopt_mask)
    DLopt.record_DLdata(DLmodes,
                        coeff_range,
                        num_data,
                        batch_size,
                        overwrite=overwrite_data)

    print('\n\nDONE with zernike optimization............\n\n')
コード例 #4
0
def testLogisticRegression():
    epsilon = 0.000001
    testData = np.mat(np.loadtxt("data/horseColicTest.txt"))
    trainingData = np.mat(np.loadtxt("data/horseColicTraining.txt"))

    __testCore(trainingData, testData, Optimizer.GradientDescent(epsilon))
    __testCore(trainingData, testData, Optimizer.NewtonMethod(epsilon))
コード例 #5
0
def main(argv):

    Tb_no=int(argv[1])

    File_names=[]

    for i in range(0,Tb_no):
        File_names.append(argv[2+i])

    try: 
        Entities,Signals,Process_Set=P.Parser(File_names)
    
    except Exception as e:

        print("\n\nFrom Main Failed : "+str(e)+"\n\n")

    try:
        kernel = OP.Optimizer(Entities,Signals,Process_Set)

    except Exception as e:

        print("\n\nFrom Main Failed : "+str(e)+"\n\n")

    Max_Time=int(argv[2+Tb_no])

    try:
        Sim.Simulation(kernel,Max_Time)

    except Exception as e:

        print("\n\nFrom Main Failed : "+str(e)+"\n\n")
コード例 #6
0
    def __init__(self, id):
        self.state2status = ("ready", "running", "suspend", "success",
                             "failed", "stoped")
        db = MySQLdb.connect(host, user, password, database)
        cursor = db.cursor()
        sql = "select * from t_task_info where id = " + str(id)
        cursor.execute(sql)
        data = cursor.fetchone()
        self.id = id
        self.modelName = data[1]
        self.dataSource = data[2]
        self.preParameter = data[3]
        self.taskName = data[4]
        self.state = State(data[7])
        self.process = data[8]
        self.lossFunction = data[9]
        self.learnRateUpdate = data[10]
        self.learnRate = data[11]
        self.batchSize = data[12]
        self.iterNum = data[13]
        self.metric = data[15]

        self.Optimizer = self.parseOptimizer(Optimizer(self.learnRateUpdate))
        self.LossFunc = self.parseLossFunc(self.lossFunction)
        self.Metric = self.parseMetric(self.metric)

        self.dataSource_dir = self.parseDataSourceBase(self.dataSource)
        self.line = self.parseModel()
        self.learnRateData = []
        self.trainLoss = []
        self.testLoss = []
        self.trainAcc = []
        self.testAcc = []
        self.con = threading.Condition()
        self.pos = 0
コード例 #7
0
def train_comparison(args, theta, dataset):
    hyper_params = {k: args[k] for k in ['b_size']}
    hyper_params['to_fix'] = [
    ]  # a selection of parameters can be fixed, e.g. the word embeddings
    # initialize optimizer with learning rate (other hyperparams: default values)
    opt = args['optimizer']
    if opt == 'adagrad':
        optimizer = Optimizer.Adagrad(theta,
                                      lr=args['learningrate'],
                                      lambda_l2=args['lambda'])
    elif opt == 'adam':
        optimizer = Optimizer.Adam(theta,
                                   lr=args['learningrate'],
                                   lambda_l2=args['lambda'])
    elif opt == 'sgd':
        optimizer = Optimizer.SGD(theta,
                                  lr=args['learningrate'],
                                  lambda_l2=args['lambda'])
    else:
        raise RuntimeError("No valid optimizer chosen")

    # train model
    evals = plain_train(optimizer,
                        dataset,
                        hyper_params,
                        n_epochs=args['n_epochs'],
                        outdir=args['out_dir'])

    # store learned model
    store_theta(
        theta, os.path.join(args['out_dir'], 'comparisonFinalModel.theta.pik'))

    # run final evaluation
    for name, tb in dataset.iteritems():
        print('Evaluation on ' + name + ' data (' + str(len(tb.examples)) +
              ' examples)')
        tb.evaluate(optimizer.theta, verbose=1)

    # create convergence plot
    for name, eval in evals.items():
        toplot = [e[key] for e in eval for key in e if 'loss' in key]
        plt.plot(xrange(len(toplot)), toplot, label=name)
    plt.legend()
    plt.title([key for key in eval[0].keys() if 'loss' in key][0])
    plt.savefig(os.path.join(args['out_dir'], 'comparisonConvergence.png'))
コード例 #8
0
def nn(nfeature, nclass, nunits, lamb, seed, learning_rate, beta1, beta2,
       epsilon):
    np.random.seed(seed)
    model = NeuralNetwork.NeuralNetwork(nfeature)
    model.add_layer(nunits, "relu", lamb)
    model.add_layer(nclass, "softmax", lamb)
    optimizer = Optimizer.Adam(learning_rate, beta1, beta2, epsilon)
    model.compile("crossentropy", optimizer)
    return model
コード例 #9
0
ファイル: run.py プロジェクト: drbh/PYSTUDENTSTANDING
def do_all(EMPLID, ACAD_PLAN, ENROLL_YEAR):
    #     EMPLID       = 1203975040
    #     ACAD_PLAN    = 'BAMKTBS'
    #     ENROLL_YEAR  = 2011

    matcher = mm.MatchMachine(st.Student(EMPLID),
                              mj.Major(ACAD_PLAN, ENROLL_YEAR))
    opt = oz.Optimizer(matcher)
    result = [opt.out, opt.missed_courses, opt.missed_requirments]
    return result
コード例 #10
0
def loso_with_best_models(features_dic):
    """!
    \brief This is the function you should call if you have a
    dictionary with keys as the speakers and each one has a
    corresponding 2D matrix and a 1d label vector for each one.
    Namely: converted_dic[spkr]['x'] = X_2D
            converted_dic[spkr]['y'] = y_list"""

    best_models = {}

    svm_opt_obj = Optimizer.ModelOptimizer(
        'svm', generate_speaker_dependent_folds(features_dic), {
            'C': [1, 3, 5, 7, 8, 10],
            'kernel': ['rbf']
        }, ['w_acc', 'uw_acc'])

    best_models["SVM Dependent"] = svm_opt_obj.optimize_model()

    svm_opt_obj = Optimizer.ModelOptimizer(
        'svm', generate_speaker_independent_folds(features_dic), {
            'C': [1, 3, 5, 7, 8, 10],
            'kernel': ['rbf']
        }, ['w_acc', 'uw_acc'])

    best_models["SVM Independent"] = svm_opt_obj.optimize_model()

    lr_opt_obj = Optimizer.ModelOptimizer(
        'lr', generate_speaker_dependent_folds(features_dic), {
            'C': [0.01, 0.05, 0.1, 0.3, 0.5, 1],
            'penalty': ['l2']
        }, ['w_acc', 'uw_acc'])

    best_models["LR Dependent"] = lr_opt_obj.optimize_model()

    lr_opt_obj = Optimizer.ModelOptimizer(
        'lr', generate_speaker_independent_folds(features_dic), {
            'C': [0.01, 0.05, 0.1, 0.3, 0.5, 1],
            'penalty': ['l2']
        }, ['w_acc', 'uw_acc'])

    best_models["LR Independent"] = lr_opt_obj.optimize_model()

    return best_models
コード例 #11
0
    def compare(folder, start_time, run_time, interface):
        runid = '_compareall'
        run_description = 'Comparing performance of all masks in folder.'

        ### PARAMS ####
        numframes = 1
        zeromask = True

        zopt = Optimizer.Optimizer(args, interface)

        # update zernike bestmask.txt for all zopt folders in dir.
        for root, dirs, files in os.walk(folder):
            zdirs = [
                os.path.join(root, d) for d in dirs if 'compare' not in d
                and any([zz in d for zz in ['zopt', 'zspace']])
            ]
            for zd in zdirs:
                zd += '/optimized_zmodes.txt'
                if os.path.isfile(zd):
                    zopt.save_zernike_mask(zd)
                    print('UPDATED bestmask.txt: ', zd)

        # create or open compare_list.txt
        if not os.path.isfile(folder + '/compare_list.txt'):
            maskfiles = Optimizer.get_mask_compare_list(folder)
        else:
            f = open(folder + '/compare_list.txt')
            maskfiles = [x.strip() for x in f]
            f.close()
        for x in maskfiles:
            print(x)

        zopt.run_compare_masks(start_time,
                               run_time,
                               numframes,
                               folder,
                               maskfiles,
                               runid,
                               run_description,
                               zeromask,
                               cmasks=None,
                               mask_labels=None)
コード例 #12
0
 def train(self,train,test,validate):
     if self.n_dimensions==0:
         self.labels=np.unique(train[1]);            
         n_classes = len(self.labels) 
         n_dimensions=np.shape(x)[1];
         self.initialize_parameters(n_dimensions,n_classes);
             
     opti=Optimizer.Optimizer(1000,"SGD",1,2000,0.13,2000*0.99);    
     opti.set_datasets(train,test,validate);
     opti.set_functions(self.negative_log_likelihood,self.set_training_data,self.classify,self.callback,self.learn,None,None);
     opti.run();
コード例 #13
0
def nn(nfeature, seed, learning_rate, beta):
    np.random.seed(seed)
    model = NeuralNetwork.NeuralNetwork(nfeature)
    model.add_layer(15, "tanh")
    model.add_layer(11, "tanh")
    model.add_layer(8, "tanh")
    model.add_layer(4, "tanh")
    model.add_layer(1, "sigmoid")
    optimizer = Optimizer.Momentum(learning_rate, beta)
    model.compile("binarycrossentropy", optimizer)
    return model
def nn(nfeature, nclass, seed, learning_rate, beta):
    np.random.seed(seed)
    model = NeuralNetwork.NeuralNetwork(nfeature)
    model.add_layer(11, "tanh")
    model.add_layer(9, "tanh")
    model.add_layer(6, "tanh")
    model.add_layer(3, "tanh")
    model.add_layer(nclass, "softmax")
    optimizer = Optimizer.Momentum(learning_rate, beta)
    model.compile("crossentropy", optimizer)
    return model
コード例 #15
0
 def train(self,train,test,validate):
      """ the main training function,that initialzes the optimizer
      and starts the training process """
      self.labels=np.unique(train[1]);            
      #initialize the optimizer        
      opti=Optimizer.Optimizer(1000,"SGD",1,200,0.13,200*0.001);    
      #set the training,testing and validation datasets
      opti.set_datasets(train,test,validate);
      #set the cinoytat
      opti.set_functions(self.logRegressionLayer.negative_log_likelihood,self.set_training_data,self.classify,self.callback,self.learn,None,None);
      opti.run();
コード例 #16
0
 def train(self,train,test,validate):
     if self.n_dimensions==0:
         self.labels=np.unique(train[1]);            
         n_classes = len(self.labels) 
         n_dimensions=np.shape(x)[1];
         self.initialize_parameters(n_dimensions,n_classes);
             
     opti=Optimizer.Optimizer(200,"SGD",1,1,0.013);    
     #(self,maxiter=1000,method="SGD",validation_iter=1,batch_size=600,learning_rate=0.13):
     opti.set_datasets(train,test,validate);
     opti.set_functions(self.negative_log_likelihood,self.set_training_data,self.classify,self.gradients,self.get_params,self.callback);
     opti.run();
コード例 #17
0
    def optimize(self):
        """
		Runs the schedule optimizer
		First: verifies that we have all the necessary data
		Seoncd: Checks the save location
		print("Put your function here")
		"""

        still_needed = []
        # Verify the required information
        if self.preference_input is None:
            still_needed.append("Preferences")
        if self.LP_input is None:
            still_needed.append("LP_input")
        if self.teacher_file is None:
            still_needed.append("Teacher File (secondary)")

        s = ""
        if len(still_needed) > 0:
            s = str(still_needed)
            messagebox.showerror("Error",
                                 "You are missing the following\n\n" + s)
            return

        GAP = self.slider.get()
        print(GAP)

        print("All good")

        # optimize_schedule(self.preference_input, self.LP_input, None,
        # 	self.teacher_file, GAP, self.requirements, self.save_location)
        # The none is for Grades, as I just hard coded that in the opt code for now

        # Create optimization instance
        O = Optimizer(self.preference_input, self.LP_input, None,
                      self.teacher_file, GAP, self.requirements,
                      self.save_location)
        O.optimize()
コード例 #18
0
def do_all(EMPLID, ACAD_PLAN, ENROLL_YEAR):

    start_time = time.time()
    matcher = mm.MatchMachine(st.Student(EMPLID),
                              mj.Major(ACAD_PLAN, ENROLL_YEAR))
    print("--- %s seconds ---" % (time.time() - start_time))

    start_time = time.time()
    opt = oz.Optimizer(matcher)
    print("--- %s seconds ---" % (time.time() - start_time))

    start_time = time.time()
    result = opt.out, opt.missed_courses, opt.missed_requirments
    print("--- %s seconds ---" % (time.time() - start_time))

    return result
コード例 #19
0
   def doIt(self, ArchitectureType):
      ReturnedTuple = self.transformObjectsIntoCode(self.Root, 0, 0, [])

      if ReturnedTuple[0]:
         print("Compilation successfull")
      else:
         print("Compilation Error")
         print(ReturnedTuple[1])

      print("\n\n")

      print(self.ImmediateCodeObj.debugImmediateCode(self.ImmediateCodeObj.ImmCodeData))

      self.ImmediateCode = self.ImmediateCodeObj.ImmCodeData

      OptimizerObj = Optimizer()
      OptimizerObj.ImmediateInstructions = self.ImmediateCode
      OptimizerObj.Variables = self.ImmediateCodeObj.Variables


      OptimizeAddSubToIncDec = True

      if ArchitectureType == Generator.EnumArchitetureType.PARALLEL:
         OptimizeAddSubToIncDec = False


      OptimizerObj.doOptimization(OptimizeAddSubToIncDec)


      self.ImmediateCodeOptimized = OptimizerObj.ImmediateInstructions
      self.Variables              = OptimizerObj.Variables
      
      print("\n\n")
      
      print(self.ImmediateCodeObj.debugImmediateCode(self.ImmediateCodeOptimized))

      if   ArchitectureType == Generator.EnumArchitetureType.PARALLEL:
         self.ArchOutput.ImmediateCodeOptimized = self.ImmediateCodeOptimized
         self.ArchOutput.VariablesOptimized     = self.Variables

         (CalleeSuccess, CalleeMessage) = self.ArchOutput.generateParallelDesign()

         if not CalleeSuccess:
            print("Error: " + CalleeMessage)


      elif ArchitectureType == Generator.EnumArchitetureType.SERIAL:
         pass
      else:
         print("Internal error")
         return False
コード例 #20
0
def main(argv):

    Tb_no = int(argv[1])

    File_names = []

    for i in range(0, Tb_no):
        File_names.append(argv[2 + i])

    # Parses the vhdl files
    Signals, Process_Set = P.Parser(File_names)

    # Gets the optimized lightweight Kernel
    kernel = OP.Optimizer(Signals, Process_Set)

    Max_Time = int(argv[2 + Tb_no])

    #Simulates the files
    Sim.Simulation(kernel, Max_Time)
コード例 #21
0
import time

# (1) Set up data
ntrain = 6000
nvalid = 1000
nclass = 10
Xtrain,Ytrain,Xvalid,Yvalid = load_mnist.load_mnist(ntrain,nvalid)
# (2) Define model
nfeature = Xtrain.shape[0]
np.random.seed(10)
lamb = 0.0
model = NeuralNetwork.NeuralNetwork(nfeature)
model.add_layer(128,"relu",lamb)
model.add_layer(nclass,"softmax",lamb)
# (3) Compile model
optimizer = Optimizer.Adam(0.02,0.9,0.999,1e-7)
model.compile("crossentropy",optimizer)
model.summary()
# (4) Train model
epochs = 40
time_start = time.time()
history = model.fit(Xtrain,Ytrain,epochs,batch_size=ntrain,validation_data=(Xvalid,Yvalid))
time_end = time.time()
print("Train time: {}".format(time_end - time_start))
# (5) Predictions and plotting
# confusion matrix
print("Metrics for Validation Dataset")
Yvalid_pred = model.predict(Xvalid)
metrics.confusion_matrix(Yvalid,Yvalid_pred,nclass)
# plot loss, accuracy, and animation of results
plot_results.plot_results_history(history,["loss","valid_loss"])
コード例 #22
0
import Dicretizer
import Optimizer

print('unsupervised discretization on weatherLong.csv')
discretizer = Dicretizer.Unsupervised()
result = discretizer.discretize("weatherLong.csv", '$temp')
print(result[1])

print('unsupervised discretization on auto.csv')
discretizer = Dicretizer.Unsupervised()
result = discretizer.discretize("auto.csv", '$horsepower')
print(result[1])

optimizer1 = Optimizer.Dom()

print("domination score of weatherLong.csv")
result = optimizer1.getScore("weatherLong.csv")
print(result[1])

print("domination score of auto.csv")
result = optimizer1.getScore("auto.csv")
print(result[1])
コード例 #23
0
'''
train_dataset, test_dataset = Data_Reader.Mnist.Mnist_dataset().get_dataset()
loader_train = Data_Reader.get_dataloader(dataset=train_dataset,
                                          batch_size=param['batch_size'])
loader_test = Data_Reader.get_dataloader(dataset=test_dataset,
                                         batch_size=param['test_batch_size'])

'''
搭建模型
模型在model.py里面搭建好了,这里直接调用
'''
modelpath = './train3_AdvT.pkl'
net = Models.Lenet5.Lenet5()  # 加载模型
net = Models.load_state_dict(net, modelpath)
base.enable_cuda(net)  # 使用cuda
num_correct, num_samples, acc = Optimizer.test(net, loader_test)  # 测试一下最初的效果
print('[Start] right predict:(%d/%d) ,pre test_acc=%.4f%%' % (num_correct, num_samples, acc))

'''
训练模型
'''
net.train()  # 训练模式
criterion = nn.CrossEntropyLoss()  # 损失函数
optimizer = torch.optim.RMSprop(net.parameters(), lr=param['learning_rate'],
                                weight_decay=param['weight_decay'])  # 优化器,具体怎么优化,学习率、正则化等等

'''对抗样本攻击'''
#adversary = Adversary.FGSM.FGSM(net, param['epsilon'])  # 攻击方法
adversary = Adversary.LinfPGD.LinfPGDAttack(net, param['epsilon'])

for epoch in range(param['num_epochs']):
コード例 #24
0
def opti_mom(model):
    return Optimizer.SGD(model.param(), lr = lr, momentum = True, mu = mu)
コード例 #25
0
def opti(model):
    return Optimizer.SGD(model.param(),lr = lr)
コード例 #26
0
    def initialDictionary(self):
        # if there is not a single negative value in the basic coefficients, then it is not necessary to
        # run the initialization phase
        if all(i >= 0 for i in self.b):
            return self
        else:
            if DEBUG:
                print("Original Dictionary")
                print(self)

            # get the dictionary with the objective function changed
            newObjDict = self.newObjectiveForInitializationPhase()

            if DEBUG:
                print("Dictionary with New Objective Function")
                print(newObjDict)

            # get the dual of the new dictionary
            dualDict = newObjDict.dual()

            if DEBUG:
                print("Dual Dictionary with Objective Changed")
                print(dualDict)

            # optimize the dual of the new dictionary
            opt = Optimizer()
            steps, dualOptmized, status = opt.solveLinearProgrammingRelaxation(
                dualDict)

            # if the optimization phase results in an Unbounded dictionary,
            # then the original dictionary is Infeasible
            if status == Dictionary.UNBOUNDED:
                self.statuscode = Dictionary.INFEASIBLECODE
                self.status = Dictionary.INFEASIBLE
                return self
            else:
                # mount the primal dictionary from the optmized dual
                if DEBUG:
                    print("Dual After Initialization Phase")
                    print(dualOptmized)

                # first, get the raw primal
                primalDictionary = dualOptmized.dual()

                if DEBUG:
                    print("Primal Dictionary After Initialization")
                    print(primalDictionary)

                # second, change the objective function to the original objective
                A_aux = primalDictionary._getAuxMatrix()
                C_aux = map(lambda x: mp.mpf(str(x)),
                            np.zeros(len(self.c) + 1))

                # compute the new value of the C vector according to the original objective function (original c vector)
                for i in range(len(self.nonBasicIdx)):
                    idx_aux = np.nonzero(
                        primalDictionary.basicIdx == self.nonBasicIdx[i])[0]

                    # if the variable is one of the basic variables of the primal,
                    # then add the associated row times the coefficient in the original dictionary
                    if len(idx_aux) > 0:
                        idx_aux = idx_aux[0]
                        C_aux += self.c[i] * A_aux[idx_aux, :]
                    else:
                        # if the variable is one of the non basic variables of the primal,
                        # then add the coefficient in the original dictionary to the related column in the primal
                        idx_aux = np.nonzero(primalDictionary.nonBasicIdx ==
                                             self.nonBasicIdx[i])[0]

                        if len(idx_aux) > 0:
                            idx_aux = idx_aux[0]
                            C_aux[idx_aux + 1] += self.c[
                                i]  # index shifted by +1 because of the Z value

                primalDictionary.z = C_aux[0]
                primalDictionary.c = C_aux[1:primalDictionary.n + 1]
                primalDictionary.status = Dictionary.STANDARD

                if DEBUG:
                    print(
                        "Primal Dictionary After Initialization with Original Objective"
                    )
                    print(primalDictionary)

                return primalDictionary
コード例 #27
0
 def preProcess(self):
     optimizer1 = Optimizer.Dom()
     self.filteredData = optimizer1.getScore(self.csvFile)[0]
     self.table = Rows.TableLoader(self.csvFile)
     self.table.loadTableWithGenerator()
コード例 #28
0
y_test = one_hot[50000:60000]
x_test = x_train[50000:60000]
x_train = x_train[:50000]

# initialize parameters
num_classes = y_train.shape[1]
hidden = 50
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
print(x_train.shape)

model = Model()
model.addlayer(ConvLayer(32, (3, 3)), name='conv1')
model.addlayer(Layers.ReLU(), name='relu1')
model.addlayer(Layers.Dropout(), name='dropout1')
model.addlayer(Layers.Flatten(), name='flatten')
model.addlayer(Layers.MulLayer(10), name="w1")
model.addlayer(Layers.AddLayer(10), name='b1')
model.addlayer(Layers.ReLU(), name='relu3')
model.addlayer(Layers.Dropout(0.5), name='dropout3')
model.addlayer(Layers.SoftmaxLayer(), name='softmax')

optimizer = Optimizer.Adam(batch_size=32)
model.train(x_train, y_train, optimizer, 10000, 0.01)

model.save()
print("--TRAIN EVAL--")
model.eval(x_train, y_train)
print("--TEST EVAL--")
model.eval(x_test, y_test)
コード例 #29
0
def main(argv):
  
  separator = ' '
  
  # read m(# of constraints) and n(# of variables)
  m, n = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(m)+"\n")
    sys.stderr.write(str(n)+"\n")
  
  # read the list of basic indices
  basicIdx = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(basicIdx)+"\n")
  
  # read the list of non-basic indices
  nonBasicIdx = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(nonBasicIdx)+"\n")
  
  # read the list of values for the basic indices (at the same order they are declared)
  # remember: Ax <= b, b is the list being read
  basicValues = toFloat(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(basicValues)+"\n")
  
  # read the matrix A (from problem Ax <= b)
  coeffMatrix = []
  for i in range(m):
    row = toFloat(readLine(sys.stdin, separator))
    coeffMatrix.append(row)
  
  coeffMatrix = np.array(coeffMatrix)
  
  if DEBUG:
    sys.stderr.write(str(coeffMatrix)+"\n")
  
  # read the current objective value and the current objective row coefficients
  coeffObjs = toFloat(readLine(sys.stdin, separator))
  z = coeffObjs[0]
  coeffObj = coeffObjs[1:]
  
  dictionary = Dictionary(m, n, basicIdx, nonBasicIdx, basicValues, coeffMatrix, coeffObj, z, mp.power(10,-100))

  if DEBUG:
    sys.stderr.write("Original Dictionary\n")
    sys.stderr.write(str(dictionary)+"\n")
  
  # Initialization Phase
  newDictionary = dictionary.initialDictionary()

  if DEBUG:
    sys.stderr.write("Dictionary After Initialization Phase\n")
    sys.stderr.write(newDictionary+"\n")

  sys.stdout.flush()

  if newDictionary.statuscode == Dictionary.INFEASIBLECODE:
    sys.stdout.write(newDictionary.status)
    return

  opt = Optimizer()
  cuts, optmizedDict, status = opt.solveLinearProgrammingRelaxation(newDictionary)

  if DEBUG:
    sys.stderr.write("Ootimized Dictionary\n")
    sys.stderr.write(str(optmizedDict)+"\n")
  
  if optmizedDict.statuscode < 0:
    sys.stdout.write(status)
  else:
    print "%.6f" % optmizedDict.z
コード例 #30
0
ファイル: mlp.py プロジェクト: azheng92/IEMS_490_FINAL
        
    learning_rate = 0.01
    L1_reg = 0.00
    L2_reg = 0.0001
    n_epochs = 2
    dataset = 'mnist.pkl.gz'
    batch_size = 20
    n_hidden = 500
    n_out = 10

    (xtrain,ytrain), (xvalid,yvalid), (xtest, ytest) = Pickle.load(open('mnist.pkl'))
    print '... building the model'

    classifier = MLP( n_in=28 * 28,
                      n_out=10,
                      layers = [LayerData(n_out=n_hidden, activation=T.tanh)],
                      L1_reg = L1_reg,
                      L2_reg = L2_reg)

    print '... training'
    start = time.clock()
    opt.gradient_descent(NN = classifier,
                        x_train = xtrain,
                         y_train = ytrain,
                         learning_rate = learning_rate,
                         n_epochs = n_epochs,
                         batch_size = batch_size,
                         x_valid = xvalid,
                         y_valid = yvalid)

コード例 #31
0
        low=-np.sqrt(6. / (784 + 500)),
        high=np.sqrt(6. / (784 + 500)),
        size=(784, 500))
    
    W_init = th.shared(np.asarray(random_arr, dtype=th.config.floatX), 'W', borrow = True)

    # Autoencoder - NO NOISE
    AE = AutoEncoder(n_in = 784,
                     layers = [NN.LayerData(n_out = 500, W = W_init)],
                     rng = rng)

    print '... Training Autoencoder'
    opt.gradient_descent(AE,
                         x_train = xtrain,
                         y_train = xtrain,
                         learning_rate = 0.1,
                         batch_size = 20,
                         n_epochs = 20,
                         x_valid = xvalid,
                         y_valid = xvalid)
                         
    # Autoencoder - 30% NOISE
    DAE = DenoisingAutoEncoder(n_in = 784,
                                layers = [NN.LayerData(n_out = 500, W = W_init)],
                                corruption_level = 0.3,
                                rng = rng)


    print '... Training Autoencoder'
    opt.gradient_descent(DAE,
                         x_train = xtrain,
                         y_train = xtrain,
コード例 #32
0
  def initialDictionary(self):
    # if there is not a single negative value in the basic coefficients, then it is not necessary to
    # run the initialization phase
    if all(i >= 0 for i in self.b):
      return self
    else:
      if DEBUG:
        print("Original Dictionary")
        print(self)

      # get the dictionary with the objective function changed
      newObjDict = self.newObjectiveForInitializationPhase()

      if DEBUG:
        print("Dictionary with New Objective Function")
        print(newObjDict)

      # get the dual of the new dictionary
      dualDict = newObjDict.dual()

      if DEBUG:
        print("Dual Dictionary with Objective Changed")
        print(dualDict)
      
      # optimize the dual of the new dictionary
      opt = Optimizer()
      steps, dualOptmized, status = opt.solveLinearProgrammingRelaxation(dualDict)
      
      # if the optimization phase results in an Unbounded dictionary, 
      # then the original dictionary is Infeasible
      if status == Dictionary.UNBOUNDED:
        self.statuscode = Dictionary.INFEASIBLECODE
        self.status = Dictionary.INFEASIBLE
        return self
      else:
        # mount the primal dictionary from the optmized dual
        if DEBUG:
          print("Dual After Initialization Phase")
          print(dualOptmized)

        # first, get the raw primal
        primalDictionary = dualOptmized.dual()

        if DEBUG:
          print("Primal Dictionary After Initialization")
          print(primalDictionary)
        
        # second, change the objective function to the original objective
        A_aux = primalDictionary._getAuxMatrix()
        C_aux = map(lambda x: mp.mpf(str(x)), np.zeros(len(self.c)+1))
        
        # compute the new value of the C vector according to the original objective function (original c vector)
        for i in range(len(self.nonBasicIdx)):
          idx_aux = np.nonzero(primalDictionary.basicIdx == self.nonBasicIdx[i])[0]
          
          # if the variable is one of the basic variables of the primal, 
          # then add the associated row times the coefficient in the original dictionary
          if len(idx_aux) > 0:
            idx_aux = idx_aux[0]
            C_aux += self.c[i]*A_aux[idx_aux, :]
          else:
            # if the variable is one of the non basic variables of the primal, 
            # then add the coefficient in the original dictionary to the related column in the primal
            idx_aux = np.nonzero(primalDictionary.nonBasicIdx == self.nonBasicIdx[i])[0]
            
            if len(idx_aux) > 0:
              idx_aux = idx_aux[0]
              C_aux[idx_aux+1] += self.c[i] # index shifted by +1 because of the Z value
        
        primalDictionary.z = C_aux[0]
        primalDictionary.c = C_aux[1:primalDictionary.n+1]
        primalDictionary.status = Dictionary.STANDARD
        
        if DEBUG:
          print("Primal Dictionary After Initialization with Original Objective")
          print(primalDictionary)
        
        return primalDictionary
コード例 #33
0
    print('Match file:', opt_file)
    matches = pickle.load(open(opt_file, "rb"))
elif os.path.isfile(source_file):
    print('Match file:', source_file)
    matches = pickle.load(open(source_file, "rb"))
else:
    print("Cannot find a matches file to load... aborting")
    quit()

print('Match features:', len(matches))

# load the group connections within the image set
groups = Groups.load(args.project)
print('Main group size:', len(groups[0]))

opt = Optimizer.Optimizer(args.project)
opt.setup(proj, groups[0], matches, optimized=args.refine)
cameras, features, cam_index_map, feat_index_map, fx_opt, fy_opt, cu_opt, cv_opt, distCoeffs_opt = opt.run(
)

# mark all the optimized poses as invalid
for image in proj.image_list:
    opt_cam_node = image.node.getChild('camera_pose_opt', True)
    opt_cam_node.setBool('valid', False)

for i, cam in enumerate(cameras):
    image_index = cam_index_map[i]
    image = proj.image_list[image_index]
    ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
    print('optimized cam:', cam)
    rvec = cam[0:3]
コード例 #34
0
# driver_logisticregression.py

import LRegression
import example_classification
import matplotlib.pyplot as plt
import numpy as np
import Optimizer
import plot_results

# (1) Set up data
nfeature = 2
m = 1000
case = "linear"
nclass = 2
X, Y = example_classification.example(nfeature, m, case, nclass)
# (2) Define model
lamb = 0.01
model = LRegression.LRegression(nfeature, "sigmoid", lamb)
# (3) Compile model
optimizer = Optimizer.GradientDescent(0.5)
model.compile("binarycrossentropy", optimizer)
# (4) Train model
epochs = 100
history = model.fit(X, Y, epochs)
# (5) Results
# plot loss and accuracy
plot_results.plot_results_history(history, ["loss"])
plot_results.plot_results_history(history, ["accuracy"])
# plot heatmap in x0-x1 plane
plot_results.plot_results_classification((X, Y), model, nclass)
plt.show()
コード例 #35
0
# Change random seed to get different random numbers: seed (integer)
# Change number of training data samples: ntrain up to 60000
# Change number of validation data samples: nvalid up to 10000
# Change learning rate for optimization: learning_rate >0
# Change number of iterations: niterations
seed = 10
ntrain = 6000
nvalid = 1000
learning_rate = 0.02
niteration = 40
# (1) Set up data
nclass = 10
Xtrain, Ytrain, Xvalid, Yvalid = load_mnist.load_mnist(ntrain, nvalid)
# (2) Define model
nfeature = Xtrain.shape[0]
np.random.seed(seed)
model = NeuralNetwork.NeuralNetwork(nfeature)
model.add_layer(128, "relu")
model.add_layer(nclass, "softmax")
# (3) Compile model
optimizer = Optimizer.Adam(learning_rate, 0.9, 0.999, 1e-7)
model.compile("crossentropy", optimizer)
model.summary()
# (4) Train model
history = model.fit(Xtrain, Ytrain, niteration)
# (5) Predictions and plotting
# plot data, loss, and animation of results
Yvalid_pred = model.predict(Xvalid)
plot_results.plot_data_mnist(Xtrain, Ytrain)
plot_results.plot_results_history(history, ["loss"])
plot_results.plot_results_mnist_animation(Xvalid, Yvalid, Yvalid_pred, 25)
コード例 #36
0
def main(argv):
  
  separator = ' '
  
  # read m(# of constraints) and n(# of variables)
  m, n = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(m)+"\n")
    sys.stderr.write(str(n)+"\n")
  
  # read the list of basic indices
  basicIdx = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(basicIdx)+"\n")
  
  # read the list of non-basic indices
  nonBasicIdx = toInt(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(nonBasicIdx)+"\n")
  
  # read the list of values for the basic indices (at the same order they are declared)
  # remember: Ax <= b, b is the list being read
  basicValues = toFloat(readLine(sys.stdin, separator))
  
  if DEBUG:
    sys.stderr.write(str(basicValues)+"\n")
  
  # read the matrix A (from problem Ax <= b)
  coeffMatrix = []
  for i in range(m):
    row = toFloat(readLine(sys.stdin, separator))
    coeffMatrix.append(row)
  
  coeffMatrix = np.array(coeffMatrix)
  
  if DEBUG:
    sys.stderr.write(str(coeffMatrix)+"\n")
  
  # read the current objective value and the current objective row coefficients
  coeffObjs = toFloat(readLine(sys.stdin, separator))
  z = coeffObjs[0]
  coeffObj = coeffObjs[1:]
  
  dictionary = Dictionary(m, n, basicIdx, nonBasicIdx, basicValues, coeffMatrix, coeffObj, z)

  if DEBUG:
    sys.stderr.write("Original Dictionary\n")
    sys.stderr.write(str(dictionary)+"\n")
  
  opt = Optimizer()
  cuts, optmizedDict, status = opt.solveIntegerLinearProgrammingWithCuttingPlane(dictionary)

  if DEBUG:
    sys.stderr.write("Ootimized Dictionary\n")
    sys.stderr.write(str(dictionary)+"\n")
  
  #sys.stdout.flush()
  if status == Dictionary.INFEASIBLE:
    print(status)
  elif status == Dictionary.UNBOUNDED:
    print(status)
  else:
    print "%.6f" % optmizedDict.z
コード例 #37
0
    '/home/philipp/alzheimers/neurodegeneration-forecast/Network_Data/Control/Average/adj_threshold_6')

from openpyxl import load_workbook

wb = load_workbook(
    '/home/philipp/alzheimers/neurodegeneration-forecast/Neurodeg_Data_SUVR/SUVRs_New_Atlas_Modeling.xlsx')
ws = wb['SUVRs_New_Atlas']
concentrations = np.array([[i.value for i in j] for j in ws['B2':'UZ21']])
concentrations = concentrations[np.argsort(np.sum(concentrations, axis=1))]

# params = {"lowerInit": 0.3,
#          "upperInit": 0.6}  # 2149.3

# params = {'ConcentrationLinear': 2.6920626035910398, 'ProductionConstant': 0.7433074157266951, 'ConcentrationSigmoidA': 3.166102288632473, 'ConcentrationSigmoidB': 0.6100809472161937, 'ConcentrationSigmoidC': 7.449624824119648, 'WD-LinearA': 0.35120110860618303, 'WD-SigmoidA': 4.39411213859124, 'WD-SigmoidB': 7.652702177481957, 'WD-SigmoidC': 1.5091546379416079, 'lowerInit': 1.005106421821866, 'upperInit': 1.2086002841534664} #2052.006795851523
#params = {'lowerInit': 0.8, 'upperInit': 1.0}
params = {'ConcentrationLinear': 0.07, 'ProductionConstant': 0.01, 'ConcentrationSigmoidA': 1.0, 'ConcentrationSigmoidB': 1.0, 'ConcentrationSigmoidC': 2, 'WD-LinearA': 0.05, 'WD-SigmoidA': 0.1, 'WD-SigmoidB': 0.8194874373805783, 'WD-SigmoidC': 5, 'lowerInit': 0.9, 'upperInit': 1.20}


bsm = BrainsphereModel.BrainsphereModel(connectivity, concentrations, params=params, euclideanAdjacency=adjacency)

import Optimizer

o = Optimizer.Optimizer(bsm)
o.optimize()

# import timeit

# print(timeit.timeit('bsm.gradient()',number=10,globals=globals()))  # 96.5
# print(timeit.timeit('bsm.gradient4()',number=10,globals=globals())) # 99.0
# print(bsm.gradient4())
コード例 #38
0
ファイル: main.py プロジェクト: SundongCandy/lift-js
    output_file = args['--output']
    ast = None
    try:
        with open(input_file, 'r') as f:
            Parser.build("Program")
            source = f.read()
            ast = Parser.parse(source)
    except IOError:
        print 'Error opening file %s. Please check the file or ' \
              'the directory.' % input_file
        sys.exit(1)

    if ast is None:
        error_list = list(Parser.error_list.keys())
        error_list.sort()
        for key in error_list:
            sys.stdout.write(Parser.error_list[key])
        sys.stdout.flush()
        sys.exit(-1)

    if args['-O']:
        ast = Optimizer.optimize(ast)

    try:
        with open(output_file, 'w') as f:
            f.write(Generator.generate(ast))
    except IOError:
        print 'Error writing to file %s. Please check the file or ' \
              'the directory.' % output_file
        sys.exit(1)