Exemple #1
0
    def fit(self, data, labels, l1_weight_cost=0., l2_weight_cost=0.):
        if self.optimizer == "lbfgs":
            from scipy.optimize import minimize
            res = minimize(self.f_and_g,
                           self.params.copy(),
                           (data, labels, l1_weight_cost, l2_weight_cost),
                           method="L-BFGS-B",
                           jac=True,
                           options={"ftol": 1E-4})
            p = res.x
        elif self.optimizer == "minimize_cg":
            max_n_line_search = np.inf
            p, g, n_line_searches = minimize(
                self.params.copy(),
                (data, labels, l1_weight_cost, l2_weight_cost),
                self.f_and_g,
                True,
                maxnumlinesearch=max_n_line_search,
                verbose=self.verbose)
        else:
            raise ValueError("Unknown optimizer setting {}".format(
                self.optimizer))

        if self.verbose:
            print("Training complete!")
            self.update_params(p)
Exemple #2
0
def main(args):
    mainfile = args[0]
    mainname = os.path.splitext(os.path.basename(mainfile))[0]
    mainsrc = open(mainfile).read()

    num_statements = counter.get_statements(mainsrc)

    testfile = args[1]
    testname = os.path.splitext(os.path.basename(testfile))[0]
    testsrc = open(testfile).read()
    test_code = import_code(testsrc, testname)

    def mytest(lst_locations):
        try:
            mutant_src = mu.gen_mutant(mainsrc, lst_locations)
            mutant = import_code(mutant_src, mainname)
            r = evalmutant(mainname, mutant, test_code)
            #if not r:
            #    with open(mainfile + '_mutant_' + '_'.join([str(i) for i in lst_locations]) + '.py', 'w+') as a:
            #        print(mutant_src,file=a)
            return r
        except SyntaxError:
            print('Syntax!', lst_locations)
            return True

    mutate_lst = sorted(list(range(1, num_statements + 1)))
    r = m.minimize(mutate_lst, mytest)
    composite_list = [r[x:x + 10] for x in range(0, len(r), 10)]
    for i in composite_list:
        print(i)
    print('Total executions: ', nexecutions, ' for ', len(mutate_lst),
          ' muscore = ',
          len(r) / len(mutate_lst))
Exemple #3
0
def Module(name, filename, munge_globals=True):
    with open(filename, "rb" if p.PY2 else "r") as f:
        code = f.read()
    if args.minimize:
        # in modules only locals are worth optimizing
        code = minimize.minimize(code, True, args.obfuscate and munge_globals, args.obfuscate, args.obfuscate)
    return p.Module(name, code, False)
Exemple #4
0
 def step(self, *args):
     from minimize import minimize
     updateparams(self.model, minimize(\
                self.model.params.copy(),self.cost,self.grad,\
                    args=args,maxnumfuneval=self.maxfuneval,
                    verbose=False)[0].copy())
     Trainer.step(self, *args)
Exemple #5
0
 def _fit_with_minimize(self, learning_rate=0.1, weight_decay=0, momentum=0, verbose = True, max_lr_iter = 5, isnorm = True):
     big_weight = weight_extend(self)
     big_weight, _,_ = minimize.minimize(big_weight, helper_func_eval, (self, isnorm), maxnumlinesearch=3, verbose = False)
     weight_compress(big_weight, self)
     if verbose:
         self.feed_forward()
         return self.empirical_error()
Exemple #6
0
def trainNN(inputSize, hid1Size, hid2Size, numClasses, lambda_, inputData, labels, n_iterations=100, displ=True):
    if displ:
       sel = np.random.permutation(inputData.shape[1])
       sel = sel[0:100]
       rbm.displayData(inputData[:, sel].T)
    T1 = debugInitializeWeights(hid1Size, inputSize)
    T2 = debugInitializeWeights(hid2Size, hid1Size)
    T3 = debugInitializeWeights(numClasses, hid2Size)
    b1 = np.zeros((hid1Size, 1))
    b2 = np.zeros((hid2Size, 1))
    b3 = np.zeros((numClasses, 1))
    T = np.concatenate((T1.reshape(len(T1.flatten(1)), 1), 
                        T2.reshape(len(T2.flatten(1)), 1), 
                        T3.reshape(len(T3.flatten(1)), 1),
                        b1, b2, b3))

    NNCost = lambda p: CostFunction(p, inputSize, hid1Size, hid2Size, numClasses, inputData, labels, lambda_)
    T, cost, iteration = minimize.minimize(NNCost, T, n_iterations)

    T1 = T[0:(hid1Size*inputSize)].reshape(hid1Size,inputSize)
    T2 = T[(hid1Size*inputSize):(hid1Size*inputSize)+(hid2Size*hid1Size)].reshape(hid2Size,hid1Size)
    T3 = T[(hid1Size*inputSize)+(hid2Size*hid1Size):(hid1Size*inputSize)+(hid2Size*hid1Size)+(
         hid2Size*numClasses)].reshape(numClasses,hid2Size)

    pred = predict(T1, T2, T3, inputData)
    return pred
Exemple #7
0
def Module(name, filename, munge_globals=True):
    with open(filename, "rb" if p.PY2 else "r") as f:
        code = f.read()
    if args.minimize:
        # in modules only locals are worth optimizing
        code = minimize.minimize(code, True, args.obfuscate and munge_globals, args.obfuscate, args.obfuscate)
    return p.Module(name, code)
Exemple #8
0
    def fit_nce(self, X, k=1, mu_noise=None, L_noise=None,
                mu0=None, L0=None, c0=None, method='minimize',
                maxnumlinesearch=None, maxnumfuneval=None, verbose=False):
        _class = self.__class__
        D, Td = X.shape
        self._init_params(D, mu_noise, L_noise, mu0, L0, c0)

        noise = self._params_noise
        Y = mvn.rvs(noise.mu, noise.L, k * Td).T

        maxnumlinesearch = maxnumlinesearch or DEFAULT_MAXNUMLINESEARCH
        obj = lambda u: _class.J(X, Y, noise.mu, noise.L, *vec_to_params(u))
        grad = lambda u: params_to_vec(
            *_class.dJ(X, Y, noise.mu, noise.L, *vec_to_params(u)))

        t0 = params_to_vec(*self._params_nce)
        if method == 'minimize':
            t_star = minimize(t0, obj, grad,
                              maxnumlinesearch=maxnumlinesearch,
                              maxnumfuneval=maxnumfuneval, verbose=verbose)[0]
        else:
            t_star = sp_minimize(obj, t0, method='BFGS', jac=grad,
                                 options={'disp': verbose,
                                          'maxiter': maxnumlinesearch}).x
        self._params_nce = GaussParams(*vec_to_params(t_star))
        return (self._params_nce, Y)
Exemple #9
0
 def step(self,*args):
     from minimize import minimize
     updateparams(self.model, minimize(\
                  self.model.params.copy(),self.cost,self.grad,\
                  args=args,maxnumfuneval=self.maxfuneval,
                  verbose=False)[0].copy())
     Trainer.step(self,*args)
def process_data(inputs, values): #Funcion que ejecuta la red neuronal como tal
    _beta = 2 #penalidad de la dispersión de datos, limite de dispersion del modelo
    _lambda = 1e-4 #limita la variación de los pesos o weight decay
    _epsilon = 0.1 #evita tener valores propios en la matriz iguales a cero
    _sparsityParam = 0.6 #la activación promedio deseada en cada neurona, entre 0 y 1
    num_iter = 5000 #número máximo de iteraciones

    inputSize = inputs.shape[0] #cantidad de variables de entrada, 6 en este caso
    m = inputs.shape[1]#cantidad de casos de entrenamiento
    hiddenSize = 180 #cantidad de neuronas ocultas, ocultas porque no se sabe bien que hacen
    outputSize = 1 #las dimensiones de salida, en este caso, 1, porque es un problema de regresión

    theta = initializeParameters(outputSize, hiddenSize, inputSize) #inicializa los pesos y los sesgos de la red
    #y retorna un vector de dimension hidden*input + hidden*output + hidden + output
    inputs, meanInput, ZCAWhite = preProcess(inputs, _epsilon)# inicialización de los parámetros
    #retorna números aleatorios como una primera aproximacion
    costF = lambda p: cost.sparseLinearNNCost(p, inputSize, hiddenSize, outputSize, _lambda, _sparsityParam, _beta, inputs, values) #define la función de costo, la cual recibe por parámetro al vector de parámetros theta

    optTheta,costV,i = minimize.minimize(costF,theta,maxnumlinesearch=num_iter)
    pred = cost.predict(inputs, optTheta, inputSize, hiddenSize, outputSize)

    diff = np.linalg.norm(pred-values)/np.linalg.norm(pred+values) #peso de los parametros

    print "RMSE: %g" % (diff)
    

    np.savez('parameters.npz', optTheta = optTheta, meanInput = meanInput, ZCAWhite = ZCAWhite)
Exemple #11
0
def minimize(text):
    try:
        import jsmin
        return jsmin.jsmin(text)
    except Exception, e:
        import minimize
        return minimize.minimize(text)
Exemple #12
0
def SPGP_train(X, Y, num_pseudo_inputs, num_starts=1):
    """
    Trains a sparse Gaussian process on the input data.
    X -- DataFrame with training data (n x dim)  
    Y -- Labels for training data (n x 1)
    num_pseudo_inputs -- number of points used to fill sparse model
    num_starts -- number of attempts at minimization. Increases runtime linearly.
    
    Returns:
    xb -- pseudo-inputs as ndarray (m x dim)
    hyperparams -- tuple containing GP parameters
    
    Translated to python from Edward Snelson's matlab code by Mitchell McIntire.
    """

    (n, dim) = X.shape
    m = np.min([num_pseudo_inputs, n])

    # center data
    mu_y = np.mean(Y)
    y0 = Y - mu_y

    min_lik = np.inf
    for i in range(num_starts):
        # randomly choose initial points
        #   should randomly sample, but hacking this in for the ACR since
        #   the pandas version is older
        #xb_init = np.array(X.sample(m))
        xb_init = np.array(X.iloc[:m, :])

        # initialize hyperparameters
        hyp_ARD = np.array([-2 * np.log((X.max() - X.min() + 0.1) / 2)])
        hyp_coeff = np.array([[np.log(Y.var() + 0.1)]])
        hyp_noise = np.array([[np.log(Y.var() / 4 + 0.01)]])
        hyperparams = pack_hyps(xb_init, hyp_ARD, hyp_coeff, hyp_noise)

        # minimize neg. log likelihood
        # min_result = minimize(SPGP_likelihood, hyperparams, args=(y0,np.array(X),m), method='BFGS', jac=True)
        #iter_res = np.reshape(min_result.x, (1,(m+1)*dim + 2))
        #lik = SPGP_likelihood(iter_res,y0,np.array(X),m,compute_deriv=False)
        #st = time.time()
        (iter_res, lik, i) = minimize(hyperparams,
                                      SPGP_likelihood,
                                      args=(y0, np.array(X), m),
                                      maxnumfuneval=200)
        #print(time.time() - st)
        if (lik[0] < min_lik):
            min_lik = lik[0]
            opt_res = iter_res

    # extract minimizing hyperparameters
    (xb, hyp_ARD, hyp_coeff, hyp_noise) = unpack_hyps(opt_res, m, dim)

    hyperparams = (hyp_ARD, hyp_coeff, hyp_noise)

    return xb, hyperparams  #, mu_y
Exemple #13
0
def conjgrad(im, maxnumlinesearch=10, imshape=styleimage.shape):
    import minimize
    im_flat, fs, numlinesearches = minimize.minimize(
        im.flatten(),
        lambda x: cost(x.reshape(imshape)),
        lambda x: grad(x.reshape(imshape)).flatten(),
        args=[],
        maxnumlinesearch=maxnumlinesearch,
        verbose=False)
    return im_flat.reshape(imshape)
Exemple #14
0
def runMinVec(X, Y, w0, W, V, k, reg_weight=0.05):
    n, m = X.shape
    H = compress(w0, W, V, m, k)
    [newH, fx, i] = minimize(H,
                             lossH,
                             gradHVec, (X, Y, m, k, reg_weight),
                             maxnumlinesearch=8,
                             verbose=False)
    H = newH

    return extract(H, m, k)
Exemple #15
0
    def minimizeLayer3(self, inputData, targets, max_iter):
        layer2out = self.recognize012(inputData)

        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten(( self.W[3], self.hB[3] ))

        (X, fX, iters) = cg.minimize(VV, backprop_only3, (Dim, layer2out, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[3]  = matrices[0]
        self.hB[3] = matrices[1]
Exemple #16
0
def optimize_gp_with_minimize( gp, params ):
  objective_function = progapy.gp.gp_neglogposterior_using_free_params
  grad_function      = progapy.gp.gp_neglogposterior_grad_wrt_free_params
  
  best_p, v, t = minimize( gp.get_free_params(), \
                          objective_function, \
                          grad_function, \
                          [gp], \
                          maxnumlinesearch=params["maxnumlinesearch"] \
                         )
  print best_p
  gp.set_free_params( best_p )
Exemple #17
0
    def minimizeLayer3(self, inputData, targets, max_iter):
        layer2out = self.recognize012(inputData)

        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten((self.W[3], self.hB[3]))

        (X, fX, iters) = cg.minimize(VV, backprop_only3,
                                     (Dim, layer2out, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[3] = matrices[0]
        self.hB[3] = matrices[1]
Exemple #18
0
    def train_cg(self, features, labels, weightcost, maxnumlinesearch=numpy.inf, verbose=False):
        """Train the model using conjugate gradients.
  
           Like train() but faster. Uses minimize.py for the optimization. 
        """

        from minimize import minimize
        p, g, numlinesearches = minimize(self.params.copy(), 
                                         self.f, 
                                         self.g, 
                                         (features, labels, weightcost), maxnumlinesearch, verbose=verbose)
        self.updateparams(p)
        return numlinesearches
Exemple #19
0
    def completeStats(self,savePrefix='outStats',**kwargs):
        """ Completed statistics. 
        Inputs:
            savePrefix (='outStats'):   If not None, save output of stat completion (see below), along with self.a, self.b, self.Re, self.N as attributes in a hdf5 file.
            kwargs:     Optional keyword arguments. Can include key 'savePath' 
        Outputs:
            Output is a dict with keys:
            X: Completed covariance matrix
            Z: RHS of the Lyapunov equation
            Y1, Y2: Show up in the AMA algorithm. Not important.
            flag: Convergence flag
            steps: Number of steps at exit of AMA
            funPrimalArr, funDualArr: evaluations of the primal and dual residual functions at each step
            dualGapArr: Expected difference between primal and dual formulation."""
        kwargs['rankPar'] = kwargs.get('rankPar',200.)
        print("Parameters of the statComp instance are:")
        print("a:%.2g, b:%.2g, Re:%d, N:%d, rankPar:%.1g"%(self.a, self.b, self.Re, self.N, kwargs['rankPar']))
        A , C, B = self.makeSystem()
        Aadj, Cadj, Badj = self.makeAdjSystem()
        statsOut = minimize.minimize( A,
                outMat = C, structMat = self.structMat,
                covMat = self.covMat, outMatAdj = Cadj, dynMatAdj = Aadj, **kwargs)
        if savePrefix is not None:
            fName = kwargs.get('savePath','') + savePrefix
            a0 = 0.25; b0 = 2./3.
            fName = fName + 'R%dN%da%02db%02d.hdf5'%(self.Re, self.N, self.a//a0, self.b//b0)
            try:
                with h5py.File(fName,"w") as outFile:
                    outStats = outFile.create_dataset("outStats",data=statsOut['X'],compression='gzip')

                    for key in self.__dict__.keys():
                        # Saving all attributes of the class instance for later regeneration
                        if isinstance(self.__dict__[key], np.ndarray):
                            outFile.create_dataset(key, data=self.__dict__[key],compression='gzip')
                        else:
                            outStats.attrs[key] = self.__dict__[key]
                    for key in statsOut.keys():
                        # Saving output statistics
                        if isinstance(statsOut[key], np.ndarray):
                            outFile.create_dataset(key, data=statsOut[key],compression='gzip')
                        else:
                            outStats.attrs[key] = statsOut[key]

                print("saved statistics to ",fName)
            except:
                print("Could not save output stats for whatever reason..")

        return statsOut
Exemple #20
0
def run(b, args):
    if not args.skip:
        create_tmp_annotfile(b)

    # re-run iodine with the new annot file
    b2 = b.with_annotfile(TMP_ANNOTFILE)
    rc = b2.run_iodine(stdout=subprocess.DEVNULL)
    if rc != 0:
        print("ERROR: Iodine rejected {} !".format(TMP_ANNOTFILE),
              file=sys.stderr)
        sys.exit(1)

    if args.minimize:
        af = minimize(b, TMP_ANNOTFILE)
        with open(TMP_ANNOTFILE, "w") as f:
            f.write(af.dump())
def learn(shape_theta, shape_x, y, r, reg_lambda, n_iter):
    num_movies = y.shape[0]
    num_users = y.shape[1]

    # Normalize Ratings
    y_mean = (y.sum(axis=1)/r.sum(axis=1)).reshape((-1, 1))
    y = y - y_mean.dot(np.ones((1, num_users)))

    param_0 = np.random.randn(np.product(shape_theta) + np.product(shape_x))

    # optimize
    opt, cost, i = minimize(lambda dna: cost_function(dna, shape_theta, shape_x, y, r, reg_lambda),
                            param_0,
                            n_iter)

    theta, x = fold(opt, shape_theta, shape_x)
    return theta, x, y_mean
Exemple #22
0
    def minimizeAllLayers(self, inputData, targets, max_iter):
        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten(
            (self.W[0], self.hB[0], self.W[1], self.hB[1], self.W[2],
             self.hB[2], self.W[3], self.hB[3]))

        (X, fX, iters) = cg.minimize(VV, backprop, (Dim, inputData, targets),
                                     max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[0] = matrices[0]
        self.hB[0] = matrices[1]
        self.W[1] = matrices[2]
        self.hB[1] = matrices[3]
        self.W[2] = matrices[4]
        self.hB[2] = matrices[5]
        self.W[3] = matrices[6]
        self.hB[3] = matrices[7]
Exemple #23
0
    def minimizeAllLayers(self, inputData, targets, max_iter):
        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten((  self.W[0], self.hB[0],
                                    self.W[1], self.hB[1],
                                    self.W[2], self.hB[2],
                                    self.W[3], self.hB[3]  ))

        (X, fX, iters) = cg.minimize(VV, backprop, (Dim, inputData, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[0]  = matrices[0]
        self.hB[0] = matrices[1]
        self.W[1]  = matrices[2]
        self.hB[1] = matrices[3]
        self.W[2]  = matrices[4]
        self.hB[2] = matrices[5]
        self.W[3]  = matrices[6]
        self.hB[3] = matrices[7]
Exemple #24
0
    def train_cg(self,
                 features,
                 labels,
                 weightcost,
                 maxnumlinesearch=numpy.inf,
                 verbose=False):
        """Train the model using conjugate gradients.
  
           Like train() but faster. Uses minimize.py for the optimization. 
        """

        from minimize import minimize
        p, g, numlinesearches = minimize(self.params.copy(),
                                         self.f,
                                         self.g,
                                         (features, labels, weightcost),
                                         maxnumlinesearch,
                                         verbose=verbose)
        self.updateparams(p)
        return numlinesearches
    def train(self, x, y, reg_lambda, n_iter):
        """
        ues optimization algorithm to learn a good set of parameters from the training data x and answer y
        """
        # initiate gradient and gradient entries
        grad = np.zeros_like(self.dna)
        for layer in self.layers:
            # for each layer, set the entry of gradient, through which the gradient will be updated
            layer.grad = grad[layer.pointer: layer.pointer+layer.theta.size].reshape(layer.theta.shape)

        # optimize
        opt, cost, i = minimize(lambda dna: (self.learn(dna, x, y, reg_lambda), np.array(grad)), self.dna, n_iter)
        # TODO optimize.fmin_cg implementation
        # opt = optimize.fmin_cg(f=lambda dna: self.learn(dna, x, y, reg_lambda),  # cost function
        #                        x0=self.dna,  # initial set of parameters
        #                        fprime=lambda t: (np.array(grad),)[0],  # gradient
        #                        maxiter=n_iter)  # number of iteration

        # update dna
        self.dna[:] = opt
Exemple #26
0
def manifold_traversal(F,N,M,weights,max_iter=5,rbf_var=1e4,verbose=True,checkgrad=True,checkrbf=True):
  # returns two arrays, xpr and r
  #   xpr is optimized x+r
  #   r is optimized r
  # multiply by F to get latent space vector
  if verbose:
    print('manifold_traversal()')
    print('F',F.shape,F.dtype,F.min(),F.max())
    print('N',N)
    print('M',M)
    print('weights',weights)

  xpr_result=[]
  r_result=[]
  r=np.zeros(len(F))
  x=np.zeros(len(F))
  FFT=F.dot(F.T) # K x K
  x[-1]=1
  for weight in weights:

    if checkgrad:
      def f(*args):
        return witness_fn2(*args)[0]
      def g(*args):
        return witness_fn2(*args)[1]
      print('Checking gradient ...')
      err=scipy.optimize.check_grad(f,g,r,*(x,FFT,N,M,rbf_var,weight,False,True))
      print('gradient error',err)
      assert err<1e-5

    r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn2,(x,FFT,N,M,rbf_var,weight,verbose,checkrbf),maxnumlinesearch=50,maxnumfuneval=None,red=1.0,verbose=True)
    if verbose:
      print('r_opt',r_opt.shape,r_opt.dtype,r_opt.min(),r_opt.max(),np.linalg.norm(r_opt))
      print('r_opt values',r_opt[:5],'...',r_opt[N:N+5],'...',r_opt[-1])
    xpr_result.append(x+r_opt)
    r_result.append(r_opt)
    r=r_opt
  return np.asarray(xpr_result),np.asarray(r_result)
Exemple #27
0
def match_distribution(x,P,Q,weights,max_iter=5,rbf_var=1e4):
  print('match_distribution()')
  print('x',x.shape,x.dtype,x.min(),x.max())
  print('P',P.shape,P.dtype)
  print('Q',Q.shape,Q.dtype)
  print('weights',weights)

  # z score
  F=np.concatenate((P,Q),axis=0)
  print('F',F.shape,F.dtype,F.min(),F.max())
  sigma=F.std()
  loc=F.mean()
  print('sigma',sigma)
  print('loc',loc)
  assert sigma>0
  x=(x-loc)/sigma
  P=(P-loc)/sigma
  Q=(Q-loc)/sigma
  x_0=x*sigma+loc
  print('x',x.shape,x.dtype,x.min(),x.max())
  print('P',P.shape,P.dtype)
  print('Q',Q.shape,Q.dtype)
  print('x_0',x_0.shape,x_0.dtype,x_0.min(),x_0.max())
  
  x_result=[]
  r_result=[]

  checkgrad=True
  parallel=10
  for weight in weights:
    r=np.zeros_like(x)

    # SciPy optimizers don't work
    #solver_type='BFGS'
    #solver_type='CG'
    #print('solver_type',solver_type)
    ##solver_param={'maxiter': max_iter, 'iprint': -1, 'gtol': 1e-7}
    #solver_param={'gtol': 1e-5}
    #r_opt=scipy.optimize.minimize(witness_fn,r,args=(x,P,Q,rbf_var,weight),method=solver_type,jac=True,options=solver_param).x
    #r_opt=scipy.optimize.fmin_cg(witness_fn_loss,r,fprime=witness_fn_grad,args=(x,P,Q,rbf_var,weight))
    if checkgrad:
      def f(*args):
        return witness_fn(*args)[0]
      def g(*args):
        return witness_fn(*args)[1]
      print('Checking gradient ...')
      print(scipy.optimize.check_grad(f,g,r[:10],*(x[:10],P[:10,:10],Q[:10,:10],rbf_var,weight)))
    if parallel>1:
      assert (len(P) % parallel)==0
      assert (len(Q) % parallel)==0
      def witness_fn_parallel(r,x,P,Q,rbf_var,weight):
        result=threadparallel.unordered_parallel_call([witness_fn]*parallel,[(r,x,P[i*len(P)//parallel:(i+1)*len(P)//parallel],Q[i*len(Q)//parallel:(i+1)*len(Q)//parallel],rbf_var,weight) for i in range(parallel)],None)
        loss=sum(x[0] for x in result)
        grad=sum(x[1] for x in result)
        return loss,grad
      r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn_parallel,(x,P,Q,rbf_var,weight,'rbf'),maxnumlinesearch=50,maxnumfuneval=None,red=1.0,verbose=True)
    else:
      r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn,(x,P,Q,rbf_var,weight),maxnumlinesearch=50,maxnumfuneval=None,red=1.0,verbose=True)
    print('r_opt',r_opt.shape,r_opt.dtype,r_opt.min(),r_opt.max(),np.linalg.norm(r_opt))
    print(r_opt[:10])
    x_result.append((x+r_opt)*sigma+loc)
    r_result.append(r_opt*sigma)
  return x_0,np.asarray(x_result),np.asarray(r_result)
Exemple #28
0
  weight=1e-2
  r=np.zeros(len(F))
  x=np.zeros(len(F))
  x[-1]=1
  FFT=F.dot(F.T) # K x K
  K=N+M+L+1
  P=np.eye(N,K)
  Q=np.concatenate([np.zeros((M,N)),np.eye(M,M+L+1)],axis=1)
  BP=FFT[:,:N] # FFT.dot(P.T) # K x N
  BQ=FFT[:,N:N+M] # FFT.dot(Q.T) # K x M
  CP=np.array([FFT[i,i] for i in range(N)]) # np.array([P[i].dot(FFT).dot(P[i].T) for i in range(N)])
  CQ=np.array([FFT[N+i,N+i] for i in range(M)]) # np.array([Q[i].dot(FFT).dot(Q[i].T) for i in range(M)])
  def f(*args):
    return witness_fn3(*args)[0]
  def g(*args):
    return witness_fn3(*args)[1]
  print('Checking gradient ...')
  err=scipy.optimize.check_grad(f,g,r,*(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,True))
  print('gradient error',err)
  assert err<1e-5
  r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn3,(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,True),maxnumlinesearch=25,maxnumfuneval=None,red=1.0,verbose=True)
  print('r P',r_opt[:N],r_opt[:N].var())
  print('r Q',r_opt[N:N+M],r_opt[N:N+M].var())
  print('r T',r_opt[N+M:N+M+L],r_opt[N+M:N+M+L].var())
  print('r X',r_opt[-1])
  xhat=(x+r_opt).dot(F)
  print('xhat',xhat)
  assert sum(xhat<0)>sum(xhat>0)

  # TODO test a multimodal Q
    set_params(model_ft.models_stack[-1], tmp)
    return result

fun_grad = theano.function(
    [model_ft.varin, model_ft.models_stack[-1].vartruth],
    T.grad(model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay),
           model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
p, g, numlinesearches = minimize(
    get_params(model_ft.models_stack[-1]), return_cost, return_grad,
    (train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy')
print "***error rate: train: %f, test: %f" % (
    train_set_error_rate(), test_set_error_rate()
)

#############
# FINE-TUNE #
#############

"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
trainer = GraddescentMinibatch(
Exemple #30
0
            configFilename = config_file + ".cfg"

    if output_file:
        outputFilename = output_file

    print "Merging libraries."
    if use_compressor == "closure":
        sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
    else:
        merged = mergejs.run(sourceDirectory, None, configFilename)

    print "Compressing using %s" % use_compressor
    if use_compressor == "jsmin":
        minimized = jsmin.jsmin(merged)
    elif use_compressor == "minimize":
        minimized = minimize.minimize(merged)
    elif use_compressor == "closure_ws":
        if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
            print "\nPre-compressing using jsmin"
            merged = jsmin.jsmin(merged)
        print "\nIs being compressed using Closure Compiler Service."
        try:
            minimized = closure_ws.minimize(merged)
        except Exception, E:
            print "\nAbnormal termination."
            sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
        if len(minimized) <= 2:
            print "\nAbnormal termination due to compilation errors."
            sys.exit("ERROR: Closure Compilation using Web service failed!")
        else:
            print "Closure Compilation using Web service has completed successfully."
Exemple #31
0
outputFilename = "OpenLayers.js"

if len(sys.argv) > 1:
    configFilename = sys.argv[1]
    extension = configFilename[-4:]

    if extension != ".cfg":
        configFilename = sys.argv[1] + ".cfg"

if len(sys.argv) > 2:
    outputFilename = sys.argv[2]

print "Merging libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
if have_compressor == "jsmin":
    print "Compressing using jsmin."
    minimized = jsmin.jsmin(merged)
elif have_compressor == "minimize":
    print "Compressing using minimize."
    minimized = minimize.minimize(merged)
else:  # fallback
    print "Not compressing."
    minimized = merged
print "Adding license file."
minimized = file("license.txt").read() + minimized

print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)

print "Done."
def filtering(path, folder_out, path_figures):

########################################################################
#   FILENAME
########################################################################
    
    
    
    
    path_filt = folder_out #relative path to folder with slash at end
    
    #loading the data set
    data = np.genfromtxt(path, delimiter='\t', skip_header=1)
    
    #setting the ip window gating
    mdelay = data[0,29]
    gates = data[0,30:50]
    ipw = (mdelay+np.cumsum(data[0,30:50]))-data[0,30:50]/2
    

    #starting values for coefficients of models
    p0_linear = [1, 1]
    p0_pow2 = [1, 0, 1]
    #vectors for rms values
    rms_1, rms_2 = np.ones((len(data),1)), np.ones((len(data),1))
    #vectors for xcorr values
    xc1, xc2 = np.ones((len(data),1)), np.ones((len(data),1))
    #vector for dev(fit/mean) and dev(measured/mean)
    rmsfm, rmsmm = np.zeros((len(data),1)), np.zeros((len(data),1))
    #vector for rms misfit between data (mi) and fit
    rms_misfit = np.ones((len(data),1))
    #array for linear regression parameter (slope)
    linrg = np.ones((len(data),1))
    #array for rms value between measured decay and master decay
    rmsmmaster = np.ones((len(data),1))
    #array for rms values between fit on measured decay and master decay
    rmsfmaster = np.ones((len(data),1))
    #array for resistance misfit
    dev_res = np.zeros((len(data), 1))
    #array for chargeability misfit (single fit)
    dev_pha_sf = np.zeros((len(data), 1))
    #array for chargeability misfit (all fit)
    dev_pha_af = np.zeros((len(data), 1))
    
    #array for curve fit parameters: 3 parameters per fit and 3 fits + 
    #1 identifier wich model was fitted (0 -> pow2 / 1 -> pow2m)
    fit_param = np.zeros((len(data), 10))
    
    #array for window-wise misfit of measured and fitted decay
    #20 windows -> 20 columns
    ipw_misfit = np.zeros((len(data), 20))
    
    #array for window-wise misfit of mean decay and measured decay (minimized)
    ipw_misfit_mm = np.zeros((len(data), 20))
    
   

    #loop over data set to derive error parameters and do the fitting
    for line in range(len(data)):
        mi = data[line,9:29]
        
        try:            
            #fit using all sample points
            p, c = curve_fit(pow2, ipw, mi, p0_pow2, maxfev=100000)
            #fit using only even points
            pe, ce = curve_fit(pow2, ipw[0::2], mi[0::2], p0_pow2, maxfev=100000)
            #fit using only odd points
            po, co = curve_fit(pow2, ipw[1::2], mi[1::2], p0_pow2, maxfev=100000)
            
            if ((p[0]>0 and p[1]>0 and p[2]<0) and
                (pe[0]>0 and pe[1]>0 and pe[2]<0) and
                (po[0]>0 and po[1]>0 and po[2]<0)):
                    
                #fitting with pow2 is ok -> eval fit on window mids
                f = pow2(ipw, p[0], p[1], p[2])
                fe = pow2(ipw, pe[0], pe[1], pe[2])
                fo = pow2(ipw, po[0], po[1], po[2])
                #fitting for resistance misfit (model voltage)
                vm = pow2(7, p[0], p[1], p[2])*100
                #write the fit parameters to file
                fit_param[line, 0:3] = p
                fit_param[line, 3:6] = pe
                fit_param[line, 6:9] = po
            else:
                try:
                        
                    #fitting pow2m model
                    #use whole data for fitting
                    p, c = curve_fit(pow2m, ipw, mi, p0_pow2, maxfev=100000)
                    #only use even points
                    pe, ce = curve_fit(pow2m, ipw[0::2], mi[0::2], p0_pow2, maxfev=100000)
                    #only use odd points
                    po, co = curve_fit(pow2m, ipw[1::2], mi[1::2], p0_pow2, maxfev=100000)
                                    
                except RuntimeError:
                    p = p0_pow2
                    pe = p0_pow2
                    po = p0_pow2
                    
                f = pow2m(ipw, p[0], p[1], p[2])
                fe = pow2m(ipw, pe[0], pe[1], pe[2])
                fo = pow2m(ipw, po[0], po[1], po[2])
                #fitting for resistance misfit (model voltage)
                vm = pow2m(7, p[0], p[1], p[2])*100
                #write the fit parameters to file
                fit_param[line, 0:3] = p
                fit_param[line, 3:6] = pe
                fit_param[line, 6:9] = po
                fit_param[line, 9] = 1
           
    
            #compute rmse and cross correlation between fit1 and fit2, fit1 and fit3
            
            #misfit between fit and measured data (goodness of fit)
            rms_misfit[line] = np.sqrt(np.mean((mi-f)**2))
                
            #normalize parameters to get xcorr in range -1, 1
            fn = (f - np.mean(f)) / (np.std(f) * len(f))
            fen = (fe - np.mean(fe)) /  np.std(fe)
            fon = (fo - np.mean(fo)) /  np.std(fo)
                    
            #assign xcor values to vector  
            xc1[line] = max(np.correlate(fn, fen, 'full'))
            xc2[line] = max(np.correlate(fn, fon, 'full'))
            
            #assign rmse values to vector
            rms_1[line] = np.sqrt(np.mean((f - fe)**2))
            rms_2[line] = np.sqrt(np.mean((f - fo)**2))
            
            #compute linear regression parameters, only use pl[0] (slope)
            pl, cl = curve_fit(linear, ipw, mi, p0_linear, maxfev=100000) 
            linrg[line] = pl[0] 
            
########################################################################
# COMPUTE DEVIATION OF RESISTANCE FOR ~T=0 OF MODELED VOLTAGE (FIT) 
######################################################################## 
    
            #get the voltage in vicinity to t=0 (*100 is needed)
            #evaluate the fit function for the given parameters
            # -> is done in fitting section
            
            #get the current of the measurement
            current = data[line, 8]
            
            #compute the resistance for the fit
            resm = vm/current
            
            #calculate the misfit between measured resistance and the modeled one
            res_real = data[line, 4]
            dev_res[line] = res_real - resm
        
########################################################################
# COMPUTE MISFIT OF GLOBAL CHARGEABILITY 
######################################################################## 
    
            #measured global chargeability
            gc_meas = data[line, 5]
            
            #compute global chargeability of fit
            gc_mod_sf = np.mean(f)
            gc_mod_af = (np.mean(f) + np.mean(fo) + np.mean(fe))/3
            
            #get the misfit by subtracting measured and modelled values
            dev_pha_sf[line] = gc_meas - gc_mod_sf
            dev_pha_af[line] = gc_meas - gc_mod_af

########################################################################
# GET WINDOW-WISE MISFIT (MEASURED/FITTED) 
######################################################################## 
    
            ipw_misfit[line] = mi - f
    
######################################################################## 
    #if even the fitting of a robust model isn't possible set all the errors 
    #to nonsense values
        except RuntimeError:
            fit_param[line, 0:3] = 9999
            fit_param[line, 3:6] = 9999
            fit_param[line, 6:9] = 9999
            
            rms_misfit[line] = 9999
            
            xc1[line] = 0
            xc2[line] = 0
        
            rms_1[line] = 9999
            rms_2[line] = 9999
            
            linrg[line] = 9999
            
            dev_res[line] = 9999
            
            dev_pha_sf[line] = 9999
            dev_pha_af[line] = 9999
            
            ipw_misfit[line] = 9999
        
########################################################################
########################################################################

########################################################################
# COMPUTE THE MEAN DECAY OF DATA SET
########################################################################

    #in order to compute a mean decay of the data set the set is filtered 
    #before the calculation
    
    #array of filter indices (boolean)
    ind = np.ones((len(data),1), dtype=bool)
    
    #filter for slope
    ind[linrg>0] = 0
    
    #filter for negative chargeabilities
    ind[data[:,5]<=0] = 0
    
    #filter for rms misfit
    ind[rms_misfit>0.04] = 0
    
    #apply filter
    data_c = data[ind[:,0]]
    
    a, b = np.shape(data_c)
    
    if a <= 20:
        
        ind = np.ones((len(data),1), dtype=bool)
    
        #filter for slope
        ind[linrg>0] = 0
        
        #filter for negative chargeabilities
        ind[data[:,5]<=0] = 0
        
        #filter for rms misfit
        ind[rms_misfit>np.percentile(rms_misfit, 25)] = 0
        
        #apply filter
        data_c = data[ind[:,0]]
        
    
    #compute 3 master curves of data set
    masters = np.zeros((3, 20))
    medge = np.zeros((4, 20))
    n_b = 3
    
    #bin the gate values of all curves at every gate
    for gate in range(20):
        bmd, bed, c = stats.binned_statistic(np.sort(data_c[:,9+gate], axis=0),
        np.sort(data_c[:,9+gate], axis=0), statistic=np.median, bins=n_b)
        masters[:,gate] = bmd
        medge[:,gate] = bed
    
    #compute the integral chargeability of the master curves -> used for next steps   
    masters_ints = np.zeros((3,1))
    for ints in range(3):
        masters_ints[ints] = np.mean(masters[ints,:])
    
    #with filtered data, compute mean decay curve of data set
    m_mean = np.zeros(20)
    ms = data_c[:,9:29]
    for ll in range(len(m_mean)):
        m_mean[ll] = np.median(ms[:,ll])
            
    #compute deviation of single decay to mean decay, before calculate fit    
    for line in range(len(data)):
        mi = data[line,9:29]    
        if fit_param[line, -1] == 0:
            f = pow2(ipw, fit_param[line, 0],  fit_param[line, 1], fit_param[line, 2])
        else:
            f = pow2m(ipw, fit_param[line, 0],  fit_param[line, 1], fit_param[line, 2])
        
            
        #compute rms between mean decay and fit on data    
        rmsfm[line], nn = mz.minimize(m_mean, ipw, f)
    
        #compute rms between mean decay and measured data  
        rmsmm[line], ipw_misfit_mm[line] = mz.minimize(m_mean, ipw, mi)
        
        #compute distances of measured intregal chargeability to the master curves
        #in order to find the nearest master curve
        dists = np.zeros((3,1))
        for dist in range(3):
            dists[dist] = abs(np.mean(mi)-masters_ints[dist])
            
        #get index of shortest distance    
        idx = np.argmin(dists) 
        
        #compute rms between measured decay and nearest master curve
        rmsmmaster[line], x = mz.minimize(masters[idx], ipw, mi)
        
        #compute rms between fit on measured decay and nearest master curve
        rmsfmaster[line], x = mz.minimize(masters[idx], ipw, f)
            
    #storing rms/deviation values
    mean_xc = (xc1 + xc2)/2
    mean_rms = (rms_1 + rms_2)/2
    error = np.concatenate( \
    (rms_1, rms_2, mean_rms, xc1, xc2, mean_xc, rmsfm, rms_misfit,
    linrg, rmsmmaster, dev_res, rmsfmaster, dev_pha_af, rmsmm, ipw_misfit, ipw_misfit_mm),
    axis=1)


    frags = path.split('/')
    lid = frags[-1][:-4]
    
    #write error parameters to file
    np.savetxt(path_filt + lid + '_error_t.dat',
    error, fmt='%3.6f',
    delimiter='\t',
    header='rms1 rms2 mean_rms xc1 xc2 mean_xc rmsfm rms_misfit slope_lrg rmsmmaster dev_res rmsfmaster dev_pha_af rmsmm ipw_misfit ipw_misfit_mm',
    comments='')    
    
    #write fit parameters to file
    np.savetxt(path_filt + lid + '_fit_param.dat',
    fit_param, fmt='%3.6f',
    delimiter='\t',
    header='p_1    p_2    p_3    pe_1    pe_2    pe_3    po_1    po_2    po_3    id',
    comments='') 
    
    #plotting
    plt.ioff()
    fig = plt.figure(figsize=(8, 5))
    figManager = plt.get_current_fig_manager()
    figManager.window.showMaximized()
    
    
    for line in range(len(data)):
        plt.plot(ipw, data[line, 9:29], 'b', linewidth=0.5)
        plt.hold('True')
    for line in range(len(data_c)):
        plt.plot(ipw, data_c[line, 9:29], 'k', linewidth=1)
    for iter in range(3):
        plt.plot(ipw, masters[iter, :], 'r', linewidth=2)
    
    plt.ylim([0, np.max(masters)+5])
    plt.xlim([ipw[0]-50, ipw[-1]+50])
    plt.xlabel('Time [ms]', fontsize=14)
    plt.ylabel('Apparent Chargability [mV/V]', fontsize=14)
    plt.title('Mastercurves (red), strictly filtered data (black)', fontsize=16)
    
    fig.savefig(path_figures + lid + '_masters.png', bbox_inches='tight', dpi=200)
    plt.close()
Exemple #33
0
def Exec(code):
    if args.minimize:
        # In exec, we should always munge globals
        code = minimize.minimize(code, True, True, args.obfuscate,
                                 args.obfuscate)
    return p.Exec(code)
        model_ft.models_stack[-1].weightdecay(weightdecay),
        model_ft.models_stack[-1].params))


def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate(
        [numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result


p, g, numlinesearches = minimize(get_params(model_ft.models_stack[-1]),
                                 return_cost,
                                 return_grad,
                                 (train_x.get_value(), train_y.get_value()),
                                 logreg_epc,
                                 verbose=False)
set_params(model_ft.models_stack[-1], p)
save_params(
    model_ft,
    'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy'
)
print "***error rate: train: %f, test: %f" % (train_set_error_rate(),
                                              test_set_error_rate())

#############
# FINE-TUNE #
#############
"""
print "\n\n... fine-tuning the whole network"
Exemple #35
0
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from minimize import minimize
import multiprocessing

Q = [0.15, 0.17, 0.13, 0.19, 0.11, 0.21, 0.09, 0.23, 0.07, 0.25]
i = int(sys.argv[1])
pmt = i // len(Q)
ind = i % len(Q)
q = Q[ind]
minimize(pmt, q, i)
Exemple #36
0
hyp_init = loadtxt('hyp_init', dtype=float, delimiter=',')
hyp_init = array(hyp_init.T)
w_init = append(reshape(xb_init, [M * dim, -1], order='F'),
                reshape(hyp_init, [len(hyp_init), -1], order='F'))
w_init = reshape(w_init, [len(w_init), -1])
y_mean = mean(data_temp)
y0 = data_temp - y_mean
# start_time = time.time()
# print sense_loc.shape
# print y0
start_time = time.time()
# for i in range(1,15000):
# print(spgp_lik(w_init,y0,sense_loc,M))
[w, f, i] = minimize(w_init,
                     spgp_lik,
                     args=[y0, sense_loc, M],
                     maxnumfuneval=-funcEval,
                     verbose=True)
xb = reshape(w[0:-dim - 2], (M, dim), order='F')
hyp = reshape(w[-dim - 2:], (dim + 2, 1), order='F')
# print(w_init)
# print(w)
# print(hyp_init)
# print(hyp)
# print(exp(hyp))

# savetxt('xb_iter_x',xb[:,0],delimiter=',')
# savetxt('xb_iter_y',xb[:,1],delimiter=',')
# savetxt('hyp_learn',hyp,delimiter=',')
x = linspace(0, 200, 200)
y = linspace(0, 200, 200)
Exemple #37
0
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from minimize import minimize
import multiprocessing

pmts=[0,1,4,7,8,14]


Rec=np.recarray(1, dtype=[
    ('Q', 'f8', len(pmts)),
    ('w', 'f8'),
    ('mu', 'f8')
    ])
#
# Rec[0]=([0,0,0,0,0,0], 13.7)
#
# p=minimize(Rec)

minimize()
Exemple #38
0
from minimize import minimize
import multiprocessing

pmts = [0, 1, 4, 7, 8, 14]

Rec = np.recarray(1,
                  dtype=[
                      ('Q', 'f8', len(pmts)),
                      ('T', 'f8', len(pmts)),
                      ('St', 'f8', len(pmts)),
                      ('mu', 'f8', 1),
                      ('W', 'f8', 1),
                      ('F', 'f8', 1),
                      ('Tf', 'f8', 1),
                      ('Ts', 'f8', 1),
                      ('R', 'f8', 1),
                      ('a', 'f8', 1),
                      ('dl', 'f8', 1),
                  ])

Rec[0] = ([
    0.28609523, 0.21198892, 0.1661045, 0.23595573, 0.2543458, 0.46767996
], [
    42.43727439, 42.48680044, 42.48223214, 42.61715417, 42.97131299,
    42.35603571
], [1.14722701, 0.82496347, 0.71858647, 1.61434698, 1.48554624,
    1.03053529], 2.57341188, 13.7, 0.11035399, 0.94339727, 34.3602973,
          0.5760872, 0.36124252, 0.05)

p = minimize(Rec)
Exemple #39
0
def TAFKAP_decode(samples=None, p={}):
    def fun_negLL_norm(params, getder=True):
        if getder:
            minll, minder = fun_LL_norm(params, getder)
            minder *= -1
        else:
            minll = fun_LL_norm(params, getder)
        minll *= -1
        if getder:
            return minll, minder
        else:
            return minll

    def fun_LL_norm(params, getder=True):
        #Computes the log likelihood of the noise parameters. Also returns
        #the partial derivatives of each of the parameters (i.e. the
        #gradient), which are required by minimize.m and other efficient
        #optimization algorithms.

        input_type = 'tensor'
        if type(params) is np.ndarray:
            input_type = 'numpy'
            params = torch.tensor(params)
        elif type(params) is list:
            input_type = 'list'
            params = torch.tensor(params)

        nvox = noise.shape[1]
        ntrials = noise.shape[0]
        tau = params[0:-2]
        sig = params[-2]
        rho = params[-1]

        omi, NormConst = invSNC(W[:, 0:p['nchan']], tau, sig, rho, True)

        XXt = torch.mm(noise.t(), noise)
        negloglik = 0.5 * (MatProdTrace(XXt, omi) + ntrials * NormConst)
        negloglik = negloglik.item()

        if iscomplex(negloglik):
            negloglik = inf  #If we encounter a degenerate solution (indicated by complex-valued likelihoods), make sure that the likelihood goes to infinity.

        if (torch.cat((tau, sig.unsqueeze(-1)), 0) < 0.001).any():
            negloglik = inf
        if rho.abs() > 0.999999: negloglik = inf

        loglik = -negloglik

        if getder:
            der = torch.empty_like(params)

            ss = sqrt(ntrials)
            U = torch.mm(omi, noise.t()) / ss

            dom = torch.mm(
                omi,
                torch.eye(nvox) - torch.mm(((1 / ntrials) * XXt), omi))

            JI = 1 - torch.eye(nvox)
            R = torch.eye(nvox) * (1 - rho) + rho
            der[0:-2] = torch.mm(2 * (dom * R), tau.unsqueeze(-1)).squeeze()
            der[-1] = (dom * (tau.unsqueeze(-1) * tau.unsqueeze(0)) * JI).sum()

            der[-2] = 2 * sig * MatProdTrace(
                torch.mm(W[:, 0:p['nchan']].t(), omi), W[:, 0:p['nchan']]
            ) - sqrt(2 * sig) * (torch.mm(U.t(), W[:, 0:p['nchan']])**2).sum()

            der *= -0.5 * ntrials

            if input_type == 'numpy':
                der = der.numpy()
            elif input_type == 'list':
                der = der.tolist()

            return loglik, der
        else:
            return loglik

    def estimate_W(samples=None,
                   C=None,
                   do_boot=False,
                   test_samples=None,
                   test_C=None):
        N = C.shape[0]
        if do_boot:
            idx = torch.randint(N, (N, ))
        else:
            idx = torch.arange(0, N)

        if p['prev_C']:
            sol = torch.linalg.lstsq(
                torch.cat((C[idx, p['nchan']:], torch.ones(N, 1)), 1),
                samples[idx, :])
            W_prev = sol[0].t()
            W_prev = W_prev[:, 0:-1]
            samples -= torch.mm(C[:, p['nchan']:], W_prev.t())
            C = C[:, 0:p['nchan']]
        else:
            W_prev = torch.empty(0)

        sol = torch.linalg.lstsq(C[idx, :], samples[idx, :])
        W_curr = sol[0].t()
        W = torch.cat((W_curr, W_prev), 1)

        noise = samples[idx, :] - torch.mm(C[idx, :], W_curr.t())
        if not test_samples == None:
            test_noise = test_samples - torch.mm(test_C, W.t())
        else:
            test_noise = None

        return W, noise, test_noise

    def estimate_cov(X, lambda_var, lamb, W):
        n, pp = X.shape[:]
        W = W[:, 0:p['nchan']]

        vars = (X**2).mean(0)
        medVar = vars.median()

        t = torch.ones((pp, ) * 2).tril(-1) == 1
        samp_cov = torch.mm(X.t(), X) / n

        WWt = torch.mm(W, W.t())
        rm = torch.cat((WWt[t].unsqueeze(-1), torch.ones(t.sum(), 1)), 1)
        sol = torch.linalg.lstsq(rm, samp_cov[t].unsqueeze(-1))
        coeff = sol[0]

        # coeff = torch.matmul(samp_cov[t].view(batch_size, -1, 1).transpose(-2,-1), rm.pinv().transpose(-2,1))

        target_diag = lambda_var * medVar + (1 - lambda_var) * vars
        target = coeff[0] * WWt + torch.ones((pp, ) * 2) * coeff[1]
        target[torch.eye(pp) == 1] = target_diag

        C = (1 - lamb) * samp_cov + lamb * target
        try:
            torch.linalg.cholesky(
                C
            )  #Cholesky decomp seems to be faster than eigendecomp, so assuming we mostly don't fail this test, it's faster this way
        except:
            eigvals, eigvecs = torch.linalg.eigh(C)
            min_eigval = eigvals.min()
            print(
                'WARNING: Non-positive definite covariance matrix detected. Lowest eigenvalue: '
                + str(min_eigval.item()) +
                '. Finding a nearby PD matrix by thresholding eigenvalues at 1e-10.'
            )
            eigvals = eigvals.clamp(1e-10)
            eigvals = torch.diag(eigvals)
            C = torch.mm(torch.mm(eigvecs, eigvals), eigvecs.t())

        return C

    def find_lambda(cvInd, lambda_range):

        cv_folds = cvInd.unique()
        K = cv_folds.shape[0]
        assert K > 1, 'Must have at least two CV folds'

        W_cv, est_noise_cv, val_noise_cv = [], [], []

        def visit(intern_lamb):
            loss = 0
            for cv_iter2 in range(K):
                estC = estimate_cov(est_noise_cv[cv_iter2], intern_lamb[0],
                                    intern_lamb[1], W_cv[cv_iter2])
                vncv = val_noise_cv[cv_iter2]
                valC = torch.mm(vncv.t(), vncv) / vncv.shape[
                    0]  #sample covariance of validation data
                loss += fun_norm_loss(estC, valC)

            if loss.is_complex():
                loss = torch.Tensor(inf)

            return loss

        # Pre-compute tuning weights and noise values to use in each cross-validation split
        for cv_iter in range(K):
            val_trials = cvInd == cv_folds[cv_iter]
            est_trials = val_trials.logical_not()
            est_samples = train_samples[est_trials, :]
            val_samples = train_samples[val_trials, :]
            this_W_cv, this_est_noise_cv, this_val_noise_cv = estimate_W(
                est_samples, Ctrain[est_trials, :, 0], False, val_samples,
                Ctrain[val_trials, :, 0])
            W_cv.append(this_W_cv)
            est_noise_cv.append(this_est_noise_cv)
            val_noise_cv.append(this_val_noise_cv)

        # Grid search
        s = [x.numel() for x in lambda_range]
        Ngrid = [
            min(max(2, ceil(sqrt(x))), x) for x in s
        ]  #Number of values to visit in each dimension (has to be at least 2, except if there is only 1 value for that dimension)

        grid_vec = [
            torch.linspace(0, y - 1, x).int() for x, y in zip(Ngrid, s)
        ]
        grid_x, grid_y = torch.meshgrid(grid_vec[0], grid_vec[1])
        grid_l1, grid_l2 = torch.meshgrid(lambda_range[0], lambda_range[1])
        grid_l1, grid_l2 = grid_l1.flatten(), grid_l2.flatten()

        sz = s.copy()
        sz.reverse()

        print('--GRID SEARCH--')
        losses = torch.empty(grid_x.numel(), 1)
        for grid_iter in range(grid_x.numel()):
            this_lambda = torch.Tensor(
                (lambda_range[0][grid_x.flatten()[grid_iter]],
                 lambda_range[1][grid_y.flatten()[grid_iter]]))
            losses[grid_iter] = visit(this_lambda)
            print(
                "{:02d}/{:02d} -- lambda_var: {:3.2f}, lambda: {:3.2f}, loss: {:g}"
                .format(grid_iter, grid_x.numel(), *this_lambda,
                        losses[grid_iter].item()))

        visited = sub2ind(sz,
                          grid_y.flatten().tolist(),
                          grid_x.flatten().tolist())
        best_loss, best_idx = losses.min(0)
        best_idx = visited[best_idx]

        best_lambda_gridsearch = (grid_l1[best_idx], grid_l2[best_idx])
        print(
            'Best lambda setting from grid search: lambda_var = {:3.2f}, lambda = {:3.2f}, loss = {:g}'
            .format(*best_lambda_gridsearch, best_loss.item()))

        # Pattern search
        print('--PATTERN SEARCH--')
        step_size = int(
            2**floor(log2(torch.diff(grid_y[0][0:2]) / 2))
        )  #Round down to the nearest power of 2 (so we can keep dividing the step size in half
        while True:
            best_y, best_x = ind2sub(sz, best_idx)
            new_x = best_x + torch.Tensor((-1, 1, -1, 1)).int() * step_size
            new_y = best_y + torch.Tensor((-1, -1, 1, 1)).int() * step_size
            del_idx = torch.logical_or(
                torch.logical_or(new_x < 0, new_x >= lambda_range[0].numel()),
                torch.logical_or(new_y < 0, new_y >= lambda_range[1].numel()))
            new_x = new_x[del_idx.logical_not()]
            new_y = new_y[del_idx.logical_not()]
            new_idx = sub2ind(sz, new_y.tolist(), new_x.tolist())
            not_visited = [x not in visited for x in new_idx]
            new_idx = [i for (i, v) in zip(new_idx, not_visited) if v]
            if len(new_idx) > 0:
                this_losses = torch.empty(len(new_idx))
                for ii in range(len(new_idx)):
                    this_lambda = torch.Tensor(
                        (grid_l1[new_idx[ii]], grid_l2[new_idx[ii]]))
                    this_losses[ii] = visit(this_lambda)
                    print(
                        "Step size: {:d}, lambda_var: {:3.2f}, lambda: {:3.2f}, loss: {:g}"
                        .format(step_size, *this_lambda,
                                this_losses[ii].item()))
                visited.extend(new_idx)
                # visited = torch.cat((visited, torch.tensor(new_idx)),0)
                losses = torch.cat((losses, this_losses.unsqueeze(-1)), 0)

            if (this_losses < best_loss).any():
                best_loss, best_idx = losses.min(0)
                best_idx = visited[best_idx]
            elif step_size > 1:
                step_size = int(step_size / 2)
            else:
                break

        best_lambda = torch.Tensor((grid_l1[best_idx], grid_l2[best_idx]))
        print(
            "Best setting found: lambda_var = {:3.2f}, lambda = {:3.2f}, loss: {:g}"
            .format(*best_lambda, best_loss.item()))

        return best_lambda

    torch.set_default_dtype(torch.float64)
    # torch.set_default_tensor_type(torch.cuda.DoubleTensor)

    defaults = {  #Default settings for parameters in 'p'    
        'Nboot': int(5e4),  #Maximum number of bootstrap iterations 
        'precomp_C':
        4,  #How many sets of channel basis functions to use (swap between at random) - for PRINCE, this value is irrelevant
        'randseed':
        1234,  #The seed for the (pseudo-)random number generator, which allows the algorithm to reproduce identical results whenever it's run with the same input, despite being stochastic. 
        'prev_C':
        False,  #Regress out contribution of previous stimulus to current-trial voxel responses?        
        'dec_type': 'TAFKAP',  # 'TAFKAP' or 'PRINCE'            
        'stim_type':
        'circular',  #'circular' or 'categorical'. Also controls what type of data is simulated, in case no data is provided.        
        'DJS_tol':
        1e-8,  #If the Jensen-Shannon Divergence between the new likelihoods and the previous values is smaller than this number, we stop collecting bootstrap samples (before the maximum of Nboot is reached). If you don't want to allow this early termination, you can set this parameter to a negative value.
        'nchan':
        8,  #Number of "channels" i.e. orientation basis functions used to fit voxel tuning curves
        'chan_exp':
        5  #Exponent to which basis functions are raised (higher = narrower)
    }

    p = setdefaults(defaults, p)

    if samples == None:
        print('--SIMULATING DATA--')
        Ntraintrials = 200
        Ntesttrials = 20
        Ntrials = Ntraintrials + Ntesttrials
        nclasses = 4
        #Only relevant when simulating categorical stimuli

        samples, sp = makeSNCData({
            'nvox': 500,
            'ntrials': Ntrials,
            'taumean': 0.7,
            'ntrials_per_run': Ntesttrials,
            'Wstd': 0.3,
            'sigma': 0.3,
            'randseed': p['randseed'],
            'shuffle_oris': 1,
            'sim_stim_type': p['stim_type'],
            'nclasses': nclasses
        })

    p['train_trials'] = torch.arange(Ntrials) < Ntraintrials
    p['test_trials'] = torch.logical_not(p['train_trials'])

    p['stimval'] = sp['stimval']
    if p['stim_type'] == 'circular': p['stimval'] /= (pi / 90)
    p['runNs'] = sp['run_idx']

    assert 'stimval' in p and 'train_trials' in p and 'test_trials' in p and 'runNs' in p, 'Must specify stimval, train_trials, test_trials and runNs'

    torch.manual_seed(p['randseed'])
    np.random.seed(p['randseed'])
    random.seed(p['randseed'])
    # torch.use_deterministic_algorithms(True)

    train_samples = samples[p['train_trials'], :]
    test_samples = samples[torch.logical_not(p['train_trials']), :]
    Ntraintrials = train_samples.shape[0]
    Ntesttrials = test_samples.shape[0]
    Nvox = train_samples.shape[1]
    train_stimval = p['stimval'][p['train_trials']]
    if p['stim_type'] == 'circular': train_stimval /= (90 / pi)

    del samples
    """  Pre-compute variables to speed up computation
    To speed up computation, we discretize the likelihoods into 100 equally
    spaced values (this value is hardcoded but can be changed as desired).
    This allows us to precompute the channel responses (basis function
    values) for these 100 stimulus values (orientations). 

    For categorical stimulus variables, likelihoods are discrete by
    definition, and evaluated only for the M classes that the data belong to.
    """

    if p['stim_type'] == 'circular':
        s_precomp = torch.linspace(0, 2 * pi, 101)
        s_precomp = (s_precomp[0:-1]).view(100, 1)
        ph = torch.linspace(0, 2 * pi / p['nchan'], p['precomp_C'] + 1)
        ph = ph[0:-1]
        classes = None
    elif p['stim_type'] == 'categorical':
        classes = torch.unique(p['stimval'])
        assert (
            classes == classes.int()).all(), 'Class labels must be integers'
        classes = classes.int()
        Nclasses = classes.numel()
        p['nchan'] = Nclasses
        ph = torch.zeros(1)
        p['precomp_C'] = 1
        s_precomp = classes.view(Nclasses, 1)

    C_precomp = torch.empty(s_precomp.shape[0], p['nchan'], p['precomp_C'])
    Ctrain = torch.empty(Ntraintrials, p['nchan'], p['precomp_C'])
    for i in range(p['precomp_C']):
        C_precomp[:, :, i] = fun_basis(s_precomp - ph[i], p['nchan'],
                                       p['chan_exp'], classes)
        Ctrain[:, :, i] = fun_basis(train_stimval - ph[i], p['nchan'],
                                    p['chan_exp'], classes)

    Ctest = torch.empty(Ntesttrials, p['nchan'], p['precomp_C'])
    if p['prev_C']:
        Ctrain_prev = torch.cat(
            (torch.empty(1, p['nchan'], p['precomp_C']), Ctrain[0:-1, :, :]),
            0)
        train_runNs = p['runNs'][p['train_trials']]
        sr_train = train_runNs == torch.cat(
            (torch.empty(1), train_runNs[0:-1]), 0)
        Ctrain_prev[sr_train.logical_not(), :, :] = 0
        Ctrain = torch.cat((Ctrain, Ctrain_prev), 1)
        test_runNs = p['runNs'][p['test_trials']]
        sr_test = test_runNs == torch.cat((torch.empty(1), test_runNs[0:-1]),
                                          0)

        test_stimval = p['stimval'][p['test_trials']]
        if p['stim_type'] == 'circular': test_stimval /= (90 / pi)
        for i in range(p['precomp_C']):
            Ctest[:, :, i] = fun_basis(test_stimval - ph[i], p['nchan'],
                                       p['chan_exp'], classes)
        Ctest_prev = torch.cat(
            (torch.empty(1, p['nchan'], p['precomp_C']), Ctest[0:-1, :, :]), 0)
        Ctest_prev[sr_test.logical_not(), :, :] = 0

    cnt = torch.zeros(Ntesttrials, s_precomp.shape[0])

    # Find best hyperparameter values (using inner CV-loop within the training data)
    if p['dec_type'] == 'TAFKAP':
        print('--PERFORMING HYPERPARAMETER SEARCH--')
        lvr = torch.linspace(0, 1, 50)
        lr = torch.linspace(0, 1, 50)
        lr = lr[1:]
        hypers = find_lambda(p['runNs'][p['train_trials']], (lvr, lr))
    elif p['dec_type'] == 'PRINCE':
        # No hyperparameter search necessary for PRINCE
        p['Nboot'] = 1
        hypers = None
    else:
        raise Exception('Invalid decoder type specified')

    # Bootstrap loop (run only once for PRINCE)

    for i in range(p['Nboot']):
        ## Bootstrap sample of W & covariance
        # Resample train trials with replacement and estimate W and the
        # covariance matrix on this resampled training data. For PRINCE, don't
        # bootstrap, but just estimate W and covariance matrix once for the
        # unresampled training data.

        if p['dec_type'] == 'TAFKAP':
            print('Bootstrap iteration: {:d}'.format(i))

            if p['precomp_C'] > 1:
                pc_idx = random.randint(0, p['precomp_C'] - 1)
            else:
                pc_idx = 0

            W, noise, _ = estimate_W(train_samples, Ctrain[:, :, pc_idx], True)
            cov_est = estimate_cov(noise, hypers[0], hypers[1], W)

            prec_mat = chol_invld(cov_est)
            if not torch.is_tensor(prec_mat):
                print(
                    'WARNING: Covariance estimate wasn\'t positive definite. Trying again with another bootstrap sample.'
                )
                continue

        else:
            print('--ESTIMATING PRINCE GENERATIVE MODEL PARAMETERS--')
            W, noise, _ = estimate_W(train_samples, Ctrain[:, :, 0], False)
            init_losses = torch.ones(100) * inf
            while init_losses.isinf().all():
                inits = [torch.rand(Nvox + 2) for x in range(100)]
                # inits[-1] = torch.cat((torch.ones(Nvox)*0.7, torch.ones(1)*0.3, torch.ones(1)*0.05),0)
                init_losses = torch.tensor(
                    [fun_negLL_norm(x, False) for x in inits])

            _, min_idx = init_losses.min(0)

            sol, _, _ = minimize(inits[min_idx].numpy(),
                                 fun_negLL_norm,
                                 maxnumlinesearch=1e4)
            # savemat('tmp.mat', {'W':W.numpy(), 'noise':noise.numpy(), 'init':inits[min_idx].numpy(), 'sol':sol})
            sol = torch.tensor(sol)
            prec_mat = invSNC(W[:, 0:p['nchan']], sol[0:-2], sol[-2], sol[-1],
                              False)
            pc_idx = 0

        # Compute likelihoods on test-trials given model parameter sample

        pred = C_precomp[:, :, pc_idx] @ W[:, 0:p['nchan']].t()

        if (i + 1) % 100 == 0: old_cnt = cnt.clone()

        # The following lines are a bit different (and more elegant/efficient) in Python+Pytorch than in Matlab
        res = test_samples
        if p['prev_C']:
            res -= torch.matmul(Ctest_prev[:, :, pc_idx],
                                W[:, p['nchan']:].t().unsqueeze(0)).squeeze()

        res = res.unsqueeze(1) - pred.unsqueeze(0)
        ps = -0.5 * ((res @ prec_mat) * res).sum(-1)
        ps = (ps - ps.amax(1, True)).softmax(1)

        cnt += ps

        if (i + 1) % 100 == 0:
            mDJS = fun_DJS(old_cnt / old_cnt.sum(1, True),
                           cnt / cnt.sum(1, True)).amax()
            print(
                'Max. change in likelihoods (JS-divergence) in last 100 iterations: {:g}'
                .format(mDJS))
            if mDJS < p['DJS_tol']: break

    liks = cnt / cnt.sum(
        1,
        True)  #(Normalized) likelihoods (= posteriors, assuming a flat prior)
    if p['stim_type'] == 'circular':
        if p['precision'] == 'double':
            pop_vec = liks.type(torch.complex128) @ (1j * s_precomp).exp()
        elif p['precision'] == 'single':
            pop_vec = liks.type(torch.complex64) @ (1j * s_precomp).exp()
        est = (pop_vec.angle() / pi *
               90) % 180  #Stimulus estimate (likelihood/posterior means)
        unc = (-2 * pop_vec.abs().log()).sqrt(
        ) / pi * 90  #Uncertainty (defined here as circular SDs of likelihoods/posteriors)
    elif p['stim_type'] == 'categorical':
        _, est = liks.max(1)
        est = classes[est]  #Convert back to original class labels
        tmp = -liks * liks.log()
        tmp[liks == 0] = 0
        unc = tmp.sum(
            1)  #Uncertainty (defind as the entropy of the distribution)

    return est, unc, liks, hypers
Exemple #40
0
def conjgrad(im, maxnumlinesearch=10, imshape=styleimage.shape):
    import minimize
    im_flat, fs, numlinesearches = minimize.minimize(im.flatten(), lambda x: cost(x.reshape(imshape)), lambda x: grad(x.reshape(imshape)).flatten(), args=[], maxnumlinesearch=maxnumlinesearch, verbose=False)
    return im_flat.reshape(imshape)
Exemple #41
0
def softmaxTrain(inputSize, numClasses, lambda_, inputData, labels, n_iterations=100):
    theta = 0.005 * np.random.randn(numClasses * inputSize, 1);
    softmaxCostF = lambda p: softmaxCost(p, numClasses, inputSize, lambda_, inputData, labels) 
    optTheta, cost, iteration = minimize.minimize(softmaxCostF, theta, n_iterations)
    optTheta = optTheta.reshape(numClasses, inputSize, order='F')
    return optTheta
Exemple #42
0
def Exec(code):
    if args.minimize:
        # In exec, we should always munge globals
        code = minimize.minimize(code, True, True, args.obfuscate, args.obfuscate)
    return p.Exec(code)
Exemple #43
0
import minimize as mi
# ID: %#6429ee5728
import check_grad as cg
import numpy as np

min = np.zeros(4)

r_val = mi.minimize(min, cg.test_f, cg.test_grad, [], maxnumlinesearch=80)
min = r_val[0]

print min
print r_val[1]
optDict = {}
optDict['rankPar'] = 10.
optDict['stepSize'] = 10.
optDict['tolPrimal'] = 1.0e-06
optDict['tolDual'] = 1.0e-06
optDict['iterMax'] = int(1.0e05)

X0 = solve_lyapunov( dynMat, -Istate)
Z0 = Istate
X0 = np.asmatrix(X0); Z0 = np.asmatrix(Z0)
Y10 = solve_lyapunov( dynMat.H, X0 )
Y10 = optDict['rankPar'] * Y10 / np.linalg.norm(Y10,ord=2)
Y20 = np.identity( outMat.shape[0] )
Y10 = np.asmatrix(Y10); Y20 = np.asmatrix(Y20)

optDict['X0'] = X0
optDict['Z0'] = Z0
optDict['Y10'] = Y10
optDict['Y20'] = Y20


outDict = minimize.minimize( dynMat, outMat=outMat, structMat=structMat, covMat=covMat, optDict=optDict,printIter=500)

#-----------------------------------------------------------------------------
# Verify Z = BH* + HB* decomposition
Z = outDict['Z']
B,H,S = ops.decomposeZ(Z)


Exemple #45
0
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from minimize import minimize
import multiprocessing

pmts = [0, 1, 4, 7, 8, 14]

Rec = np.recarray(1, dtype=[
    ('Q', 'f8', len(pmts)),
])

Rec[0] = ([0, 0, 0, 0, 0, 0], )
ind = 1

for i, q in np.linspace(0.2, 0.3, 5):
    p = minimize(Rec, q, ind, 'Q{}_{}'.format(ind, i))
Exemple #46
0
def backprop(VISHID, VISBIASES, PENRECBIASES, PENRECBIASES2, HIDRECBIASES,
             HIDPEN, HIDPEN2, HIDGENBIASES, HIDGENBIASES2, HIDTOP,
             TOPRECBIASES, TOPGENBIASES):
    # def backprop():
    import numpy as np
    import scipy.io as sio
    from makebatches import makebatches
    from mnistdisp import mnistdisp
    from minimize import minimize
    from CG_MNIST import CG_MNIST

    MAX_EPOCH = 200
    print('Fine-tuning deep autoencoder by minimizing cross entropy error.')
    print('60 batches of 1000 cases each.')

    BATCH_DATA_ = sio.loadmat("batchdata_py.mat",
                              verify_compressed_data_integrity=False)
    BATCHDATA = BATCH_DATA_['batchdata']
    TEST_BATCH_DATA_ = sio.loadmat("testbatchdata_py.mat",
                                   verify_compressed_data_integrity=False)
    TESTBATCHDATA = TEST_BATCH_DATA_['testbatchdata']

    VISHID_ = sio.loadmat("vishid_mnistvh.mat",
                          verify_compressed_data_integrity=False)
    VISHID = VISHID_['vishid_']

    W1 = np.append(VISHID, HIDRECBIASES.reshape(1, -1), axis=0)
    W2 = np.append(HIDPEN, PENRECBIASES.reshape(1, -1), axis=0)
    W3 = np.append(HIDPEN2, PENRECBIASES2.reshape(1, -1), axis=0)
    W4 = np.append(HIDTOP, TOPRECBIASES.reshape(1, -1), axis=0)
    W5 = np.append(HIDTOP.T, TOPGENBIASES.reshape(1, -1), axis=0)
    W6 = np.append(HIDPEN2.T, HIDGENBIASES2.reshape(1, -1), axis=0)
    W7 = np.append(HIDPEN.T, HIDGENBIASES.reshape(1, -1), axis=0)
    W8 = np.append(VISHID.T, VISBIASES.reshape(1, -1), axis=0)

    L1 = W1.shape[0] - 1
    L2 = W2.shape[0] - 1
    L3 = W3.shape[0] - 1
    L4 = W4.shape[0] - 1
    L5 = W5.shape[0] - 1
    L6 = W6.shape[0] - 1
    L7 = W7.shape[0] - 1
    L8 = W8.shape[0] - 1
    L9 = L1

    TEST_ERR = []
    TRAIN_ERR = []

    for epoch in range(1, MAX_EPOCH):
        ERR = 0
        NUM_CASES, NUM_DIMS, NUM_BATCHES = BATCHDATA.shape
        N = NUM_CASES
        for batch in range(1, NUM_BATCHES):
            data = BATCHDATA[:, :, batch]
            data = np.append(data, np.ones((N, 1)), axis=1)

            W1_PROBS = 1.0 / (1 + np.exp(np.matmul(-data, W1)))
            W1_PROBS = np.append(W1_PROBS, np.ones((N, 1)), axis=1)
            W2_PROBS = 1.0 / (1 + np.exp(np.matmul(-W1_PROBS, W2)))
            W2_PROBS = np.append(W2_PROBS, np.ones((N, 1)), axis=1)
            W3_PROBS = 1.0 / (1 + np.exp(np.matmul(-W2_PROBS, W3)))
            W3_PROBS = np.append(W3_PROBS, np.ones((N, 1)), axis=1)
            W4_PROBS = np.matmul(W3_PROBS, W4)
            W4_PROBS = np.append(W4_PROBS, np.ones((N, 1)), axis=1)
            W5_PROBS = 1.0 / (1 + np.exp(np.matmul(-W4_PROBS, W5)))
            W5_PROBS = np.append(W5_PROBS, np.ones((N, 1)), axis=1)
            W6_PROBS = 1.0 / (1 + np.exp(np.matmul(-W5_PROBS, W6)))
            W6_PROBS = np.append(W6_PROBS, np.ones((N, 1)), axis=1)
            W7_PROBS = 1.0 / (1 + np.exp(np.matmul(-W6_PROBS, W7)))
            W7_PROBS = np.append(W7_PROBS, np.ones((N, 1)), axis=1)
            DATAOUT = 1.0 / (1 + np.exp(np.matmul(-W7_PROBS, W8)))
            ERR += 1 / N * np.sum(
                np.sum(np.square(data[:, :-1] - DATAOUT), axis=0), axis=0)
        TRAIN_ERR = ERR / NUM_BATCHES

        print(
            'Displaying in figure 1: Top row - real data, Bottom row -- reconstructions'
        )
        OUTPUT = np.array([])
        for ii in range(15):
            A = np.append(data[ii, :-1].T, DATAOUT[ii, :].T)
            A = A.reshape(784, 2)
            OUTPUT = np.append(OUTPUT, A)
        # mnistdisp(OUTPUT)
        # plt.show()

        TESTNUMCASES, TESTNUMDIMS, TESTNUMBATCHES = TESTBATCHDATA.shape
        N = TESTNUMCASES
        ERR = 0
        for batch in range(1, TESTNUMBATCHES):
            data = TESTBATCHDATA[:, :, batch]
            data = np.append(data, np.ones((N, 1)), axis=1)

            W1_PROBS = 1.0 / (1 + np.exp(np.matmul(-data, W1)))
            W1_PROBS = np.append(W1_PROBS, np.ones((N, 1)), axis=1)
            W2_PROBS = 1.0 / (1 + np.exp(np.matmul(-W1_PROBS, W2)))
            W2_PROBS = np.append(W2_PROBS, np.ones((N, 1)), axis=1)
            W3_PROBS = 1.0 / (1 + np.exp(np.matmul(-W2_PROBS, W3)))
            W3_PROBS = np.append(W3_PROBS, np.ones((N, 1)), axis=1)
            W4_PROBS = np.matmul(W3_PROBS, W4)
            W4_PROBS = np.append(W4_PROBS, np.ones((N, 1)), axis=1)
            W5_PROBS = 1.0 / (1 + np.exp(np.matmul(-W4_PROBS, W5)))
            W5_PROBS = np.append(W5_PROBS, np.ones((N, 1)), axis=1)
            W6_PROBS = 1.0 / (1 + np.exp(np.matmul(-W5_PROBS, W6)))
            W6_PROBS = np.append(W6_PROBS, np.ones((N, 1)), axis=1)
            W7_PROBS = 1.0 / (1 + np.exp(np.matmul(-W6_PROBS, W7)))
            W7_PROBS = np.append(W7_PROBS, np.ones((N, 1)), axis=1)
            DATAOUT = 1.0 / (1 + np.exp(np.matmul(-W7_PROBS, W8)))
            ERR += 1 / N * np.sum(
                np.sum(np.square(data[:, :-1] - DATAOUT), axis=0), axis=0)
        TEST_ERR = ERR / NUM_BATCHES
        print('Before epoch {} Train squared error: {} Test squared error: {}'.
              format(epoch, TRAIN_ERR, TEST_ERR))

        TT = 0
        for batch in range(int(NUM_BATCHES / 10)):
            print('epoch {} batch {}'.format(epoch, batch))
            TT += 1
            data = np.empty((100, 784), int)
            for kk in range(10):
                data = np.append(data,
                                 BATCHDATA[:, :, ((TT - 1) * 10 + kk)],
                                 axis=0)

            MAX_ITER = 3
            VV = np.concatenate(
                (W1.reshape(1, -1), W2.reshape(1, -1), W3.reshape(1, -1),
                 W4.reshape(1, -1), W5.reshape(1, -1), W6.reshape(
                     1, -1), W7.reshape(1, -1), W8.reshape(1, -1)),
                axis=1)
            DIM = np.array([L1, L2, L3, L4, L5, L6, L7, L8,
                            L9]).reshape(1, -1).T

            f, df = CG_MNIST(VV, DIM, data)
            X, fX, i = minimize(VV, f, df, MAX_ITER, DIM, data, 1.0, True)

            W1 = X[0][0:(L1 + 1) * L2].reshape(L1 + 1, L2)
            X3 = (L1 + 1) * L2
            W2 = X[0][X3:X3 + (L2 + 1) * L3].reshape(L2 + 1, L3)
            X3 = X3 + (L2 + 1) * L3
            W3 = X[0][X3:X3 + (L3 + 1) * L4].reshape(L3 + 1, L4)
            X3 = X3 + (L3 + 1) * L4
            W4 = X[0][X3:X3 + (L4 + 1) * L5].reshape(L4 + 1, L5)
            X3 = X3 + (L4 + 1) * L5
            W5 = X[0][X3:X3 + (L5 + 1) * L6].reshape(L5 + 1, L6)
            X3 = X3 + (L5 + 1) * L6
            W6 = X[0][X3:X3 + (L6 + 1) * L7].reshape(L6 + 1, L7)
            X3 = X3 + (L6 + 1) * L7
            W7 = X[0][X3:X3 + (L7 + 1) * L8].reshape(L7 + 1, L8)
            X3 = X3 + (L7 + 1) * L8
            W8 = X[0][X3:X3 + (L8 + 1) * L9].reshape(L8 + 1, L9)
    return ERR


# if __name__ == "__main__":
#     backprop()
Exemple #47
0
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from minimize import minimize
import multiprocessing

pmts = [0, 1, 4, 7, 8, 14]
minimize(-1, 0, 0)
Exemple #48
0
def build(config_file=None, output_file=None, options=None):
    have_compressor = []
    try:
        import jsmin
        have_compressor.append("jsmin")
    except ImportError as E:
        print("No jsmin (%s)" % E)
    try:
        # tools/closure_library_jscompiler.py from:
        #       http://code.google.com/p/closure-library/source/browse/trunk/closure/bin/build/jscompiler.py
        import closure_library_jscompiler as closureCompiler
        have_compressor.append("closure")
    except Exception as E:
        print("No closure (%s)" % E)
    try:
        import closure_ws
        have_compressor.append("closure_ws")
    except ImportError as E:
        print("No closure_ws (%s)" % E)

    try:
        import minimize
        have_compressor.append("minimize")
    except ImportError as E:
        print("No minimize (%s)" % E)

    try:
        import uglify_js
        uglify_js.check_available()
        have_compressor.append("uglify-js")
    except Exception as E:
        print("No uglify-js (%s)" % E)

    use_compressor = None
    if options.compressor and options.compressor in have_compressor:
        use_compressor = options.compressor

    sourceDirectory = "../lib"
    configFilename = "full.cfg"
    outputFilename = "OpenLayers.js"

    if config_file:
        configFilename = config_file
        extension = configFilename[-4:]

        if extension != ".cfg":
            configFilename = config_file + ".cfg"

    if output_file:
        outputFilename = output_file

    print("Merging libraries.")
    try:
        if use_compressor == "closure" or use_compressor == 'uglify-js':
            sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
        else:
            merged = mergejs.run(sourceDirectory, None, configFilename)
    except mergejs.MissingImport as E:
        print("\nAbnormal termination.")
        sys.exit("ERROR: %s" % E)

    if options.amdname:
        options.amdname = "'" + options.amdname + "',"
    else:
        options.amdname = ""

    if options.amd == 'pre':
        print("\nAdding AMD function.")
        merged = "define(%sfunction(){%sreturn OpenLayers;});" % (
            options.amdname, merged)

    print("Compressing using %s" % use_compressor)
    if use_compressor == "jsmin":
        minimized = jsmin.jsmin(merged)
    elif use_compressor == "minimize":
        minimized = minimize.minimize(merged)
    elif use_compressor == "closure_ws":
        if len(
                merged
        ) > 1000000:  # The maximum file size for this web service is 1000 KB.
            print("\nPre-compressing using jsmin")
            merged = jsmin.jsmin(merged)
        print("\nIs being compressed using Closure Compiler Service.")
        try:
            minimized = closure_ws.minimize(merged).decode()
        except Exception as E:
            print("\nAbnormal termination.")
            sys.exit(
                "ERROR: Closure Compilation using Web service failed!\n%s" % E)
        if len(minimized) <= 2:
            print("\nAbnormal termination due to compilation errors.")
            sys.exit("ERROR: Closure Compilation using Web service failed!")
        else:
            print(
                "Closure Compilation using Web service has completed successfully."
            )
    elif use_compressor == "closure":
        jscompilerJar = "../tools/closure-compiler.jar"
        if not os.path.isfile(jscompilerJar):
            print("\nNo closure-compiler.jar; read README.txt!")
            sys.exit(
                "ERROR: Closure Compiler \"%s\" does not exist! Read README.txt"
                % jscompilerJar)
        minimized = closureCompiler.Compile(
            jscompilerJar,
            sourceFiles,
            [
                "--externs",
                "closure-compiler/Externs.js",
                "--jscomp_warning",
                "checkVars",  # To enable "undefinedVars"
                "--jscomp_error",
                "checkRegExp",  # Also necessary to enable "undefinedVars"
                "--jscomp_error",
                "undefinedVars"
            ]).decode()
        if minimized is None:
            print("\nAbnormal termination due to compilation errors.")
            sys.exit(
                "ERROR: Closure Compilation failed! See compilation errors.")
        print("Closure Compilation has completed successfully.")
    elif use_compressor == "uglify-js":
        minimized = uglify_js.compile(sourceFiles)
        if (sys.version_info > (3, 0)):
            minimized = minimized.decode()
        if minimized is None:
            print("\nAbnormal termination due to compilation errors.")
            sys.exit(
                "ERROR: Uglify JS compilation failed! See compilation errors.")

        print("Uglify JS compilation has completed successfully.")

    else:  # fallback
        minimized = merged

    if options.amd == 'post':
        print("\nAdding AMD function.")
        minimized = "define(%sfunction(){%sreturn OpenLayers;});" % (
            options.amdname, minimized)

    if options.status:
        print("\nAdding status file.")
        minimized = "// status: " + open(options.status).read() + minimized

    print("\nAdding license file.")
    minimized = open("license.txt").read() + minimized

    print("Writing to %s." % outputFilename)
    open(outputFilename, "w").write(minimized)

    print("Done.")
Exemple #49
0
def manifold_traversal2(FFT,N,M,L,weights,max_iter=5,rbf_var=1e4,verbose=False,checkgrad=True,checkrbf=True,maxnumlinesearch=25,initialize_KQ=None):
  # returns two arrays, xpr and r
  #   xpr is optimized x+r
  #   r is optimized r
  # multiply by F to get latent space vector
  if verbose:
    print('manifold_traversal2()')
    print('FFT',FFT.shape,FFT.dtype,FFT.min(),FFT.max())
    print('N',N)
    print('M',M)
    print('L',L)
    print('weights',weights)

  #FFT=F.dot(F.T) # K x K
  xpr_result=[]
  r_result=[]
  r=np.zeros(len(FFT))
  x=np.zeros(len(FFT))
  x[-1]=1
  K=N+M+L+1
  P=np.eye(N,K)
  Q=np.concatenate([np.zeros((M,N)),np.eye(M,M+L+1)],axis=1)
  BP=FFT[:,:N] # FFT.dot(P.T) # K x N
  BQ=FFT[:,N:N+M] # FFT.dot(Q.T) # K x M
  CP=np.array([FFT[i,i] for i in range(N)]) # np.array([P[i].dot(FFT).dot(P[i].T) for i in range(N)])
  CQ=np.array([FFT[N+i,N+i] for i in range(M)]) # np.array([Q[i].dot(FFT).dot(Q[i].T) for i in range(M)])

  if not initialize_KQ is None:
    assert initialize_KQ>0 and initialize_KQ<1
    KQ=witness_fn3_KQ(r,x,FFT,BQ,CQ,N,M,L,rbf_var)
    rbf_var*=math.log(KQ.mean())/math.log(initialize_KQ)
    if verbose:
      print('Setting sigma^2 = {}'.format(rbf_var))

  for weight in weights:

    if checkgrad and weight==weights[0]:
      def f(*args):
        return witness_fn3(*args)[0]
      def g(*args):
        return witness_fn3(*args)[1]
      print('Checking gradient ...')
      est_grad=scipy.optimize.approx_fprime(r,f,math.sqrt(np.finfo(float).eps),*(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,False))
      #print('est. gradient',est_grad)
      fn_grad=g(r,x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,True)
      #print('gradient',fn_grad)
      #print('isclose',np.isclose(est_grad,fn_grad,rtol=1e-4,atol=1e-7))
      assert np.allclose(est_grad,fn_grad,rtol=1e-4,atol=1e-5)
      #err=scipy.optimize.check_grad(f,g,r,*(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,False))
      #print('gradient error',err)
      #assert err<1e-5
      print('passed.')

    t0=time.time()
    r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn3,(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,verbose,checkrbf),maxnumlinesearch=maxnumlinesearch,maxnumfuneval=None,red=1.0,verbose=False)
    t1=time.time()
    if verbose:
      #print('r_opt',r_opt.shape,r_opt.dtype)
      print('r_opt mean P value',r_opt[:N].mean(),r_opt[:N].var())
      print('r_opt mean Q value',r_opt[N:N+M].mean(),r_opt[N:N+M].var())
      if L>0:
        print('r_opt mean T value',r_opt[N+M:N+M+L].mean(),r_opt[N+M:N+M+L].var())
      print('r_opt X value',r_opt[-1])
      print('Optimized in {} minutes.'.format((t1-t0)/60.0))
    xpr_result.append(x+r_opt)
    r_result.append(r_opt)
    r=r_opt
  return np.asarray(xpr_result),np.asarray(r_result)
Exemple #50
0
        plt.scatter(path[[0,-1],0], path[[0,-1],1])  # Indicate the initial and final points of the path
    plt.tight_layout()
    plt.show()


def multiple_paths(function, method=minimize.minimize, num_paths=10, initial_condition_box=1, **kwargs):
    """ Shows graph with a given number of random walk minimization paths. """
    paths = []

    for _ in range(num_paths):
        paths.append(method(function, initial_condition_box=initial_condition_box, **kwargs))

    show_graph(function, paths, box_size=1.2*initial_condition_box)

print("Function f:")
minimize.minimize(f, initial_condition=(2,0))
minimize.minimize_adaptive(f, initial_condition=(2,0))

print("Function g:")
minimize.minimize(g, initial_condition=(2,0))
minimize.minimize_adaptive(g, initial_condition=(2,0))

print("Function h:")
minimize.minimize(h, dimension=4)
minimize.minimize_adaptive(h, dimension=4)

print("Graphs:")
multiple_paths(f)
multiple_paths(f, method=minimize.minimize_adaptive)

multiple_paths(g)
Exemple #51
0
        pass

path = sys.argv[1]

print "JS Page Compressor - Modified for cartografur"
print "Path: " + path

for root, dirs, files in os.walk(path):
    for filename in files:
        if not filename.startswith("."):
            filepath = os.path.join(path, filename)
            filepath = filepath.replace("\\", "/")
            print 'File: ' + filepath

            data = file(filepath).read()

            if have_compressor == "jsmin":
                print "Compressing using jsmin."
                minimized = jsmin.jsmin(data)
            elif have_compressor == "minimize":
                print "Compressing using minimize."
                minimized = minimize.minimize(data)
            else: # fallback
                print "Not compressing, no compressor found."
                minimized = data

            print "Writing to %s." % filepath
            file(filepath, "w").write(minimized)

print "Done."
Exemple #52
0
def build(config_file = None, output_file = None, options = None):
    have_compressor = []
    try:
        import jsmin
        have_compressor.append("jsmin")
    except ImportError:
        print("No jsmin")
    try:
        # tools/closure_library_jscompiler.py from: 
        #       http://code.google.com/p/closure-library/source/browse/trunk/closure/bin/build/jscompiler.py
        import closure_library_jscompiler as closureCompiler
        have_compressor.append("closure")
    except Exception as E:
        print("No closure (%s)" % E)
    try:
        import closure_ws
        have_compressor.append("closure_ws")
    except ImportError:
        print("No closure_ws")
    
    try:
        import minimize
        have_compressor.append("minimize")
    except ImportError:
        print("No minimize")

    use_compressor = None
    if options.compressor and options.compressor in have_compressor:
        use_compressor = options.compressor

    sourceDirectory = "../lib"
    configFilename = "full.cfg"
    outputFilename = "OpenLayers.js"

    if config_file:
        configFilename = config_file
        extension = configFilename[-4:]

        if extension  != ".cfg":
            configFilename = config_file + ".cfg"

    if output_file:
        outputFilename = output_file

    print("Merging libraries.")
    try:
        if use_compressor == "closure":
            sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
        else:
            merged = mergejs.run(sourceDirectory, None, configFilename)
    except mergejs.MissingImport as E:
        print("\nAbnormal termination.")
        sys.exit("ERROR: %s" % E)

    if options.amdname:
        options.amdname = "'" + options.amdname + "',"
    else:
        options.amdname = ""
        
    if options.amd == 'pre':
        print("\nAdding AMD function.")
        merged = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, merged)
    
    print("Compressing using %s" % use_compressor)
    if use_compressor == "jsmin":
        minimized = jsmin.jsmin(merged)
    elif use_compressor == "minimize":
        minimized = minimize.minimize(merged)
    elif use_compressor == "closure_ws":
        if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
            print("\nPre-compressing using jsmin")
            merged = jsmin.jsmin(merged)
        print("\nIs being compressed using Closure Compiler Service.")
        try:
            minimized = closure_ws.minimize(merged)
        except Exception as E:
            print("\nAbnormal termination.")
            sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
        if len(minimized) <= 2:
            print("\nAbnormal termination due to compilation errors.")
            sys.exit("ERROR: Closure Compilation using Web service failed!")
        else:
            print("Closure Compilation using Web service has completed successfully.")
    elif use_compressor == "closure":
        jscompilerJar = "../tools/closure-compiler.jar"
        if not os.path.isfile(jscompilerJar):
            print("\nNo closure-compiler.jar; read README.txt!")
            sys.exit("ERROR: Closure Compiler \"%s\" does not exist! Read README.txt" % jscompilerJar)
        minimized = closureCompiler.Compile(
            jscompilerJar, 
            sourceFiles, [
                "--externs", "closure-compiler/Externs.js",
                "--jscomp_warning", "checkVars",   # To enable "undefinedVars"
                "--jscomp_error",   "checkRegExp", # Also necessary to enable "undefinedVars"
                "--jscomp_error",   "undefinedVars"
            ]
        )
        if minimized is None:
            print("\nAbnormal termination due to compilation errors.")
            sys.exit("ERROR: Closure Compilation failed! See compilation errors.") 
        print("Closure Compilation has completed successfully.")
    else: # fallback
        minimized = merged 

    if options.amd == 'post':
        print("\nAdding AMD function.")
        minimized = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, minimized)
    
    if options.status:
        print("\nAdding status file.")
        minimized = "// status: " + file(options.status).read() + minimized
    
    print("\nAdding license file.")
    minimized = open("license.txt").read() + minimized

    print("Writing to %s." % outputFilename)
    open(outputFilename, "w").write(minimized)

    print("Done.")