Exemplo n.º 1
0
def make_weights(datasets, wtype, make_shared=False, sparse=False):
    weights = []
    for ds in datasets:
        if wtype == 'ichol':
            w = num.linalg.inv(ds.covariance.chol)
            # print ds.covariance.chol_inverse
        elif wtype == 'icov_chol':
            w = ds.covariance.chol_inverse
            # print w

        elif wtype == 'icov':
            w = ds.covariance.inverse
        else:
            raise NotImplementedError('wtype not implemented!')

        if make_shared:
            sw = shared(w)
            if sparse:
                sw = ts.csc_from_dense(sw)

            weights.append(sw)
        else:
            weights.append(w)

    return weights
Exemplo n.º 2
0
 def test_sparsevariable(self):
     ## Re-init counter
     Variable.__count__ = count(0)
     r1 = sparse.csc_matrix(name='x', dtype='float32')
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_0"
     assert r2.auto_name == "auto_1"
     assert r3.auto_name == "auto_2"
Exemplo n.º 3
0
 def test_sparsevariable(self):
     ## Re-init counter
     Variable.__count__ = count(0)
     r1 = sparse.csc_matrix(name='x', dtype='float32')
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_0"
     assert r2.auto_name == "auto_1"
     assert r3.auto_name == "auto_2"
Exemplo n.º 4
0
 def test_sparsevariable(self):
     # Get counter value
     autoname_id = next(Variable.__count__)
     Variable.__count__ = count(autoname_id)
     r1 = sparse.csc_matrix(name="x", dtype="float32")
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_" + str(autoname_id)
     assert r2.auto_name == "auto_" + str(autoname_id + 1)
     assert r3.auto_name == "auto_" + str(autoname_id + 2)
Exemplo n.º 5
0
 def test_sparsevariable(self):
     ## Get counter value
     autoname_id = next(Variable.__count__)
     Variable.__count__ = count(autoname_id)
     r1 = sparse.csc_matrix(name='x', dtype='float32')
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_" + str(autoname_id)
     assert r2.auto_name == "auto_" + str(autoname_id + 1)
     assert r3.auto_name == "auto_" + str(autoname_id + 2)
Exemplo n.º 6
0
 def test_sparsevariable(self):
     # Get counter value
     if not sparse.enable_sparse:
         raise SkipTest('Optional package SciPy not installed')
     autoname_id = next(Variable.__count__)
     Variable.__count__ = count(autoname_id)
     r1 = sparse.csc_matrix(name='x', dtype='float32')
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_" + str(autoname_id)
     assert r2.auto_name == "auto_" + str(autoname_id + 1)
     assert r3.auto_name == "auto_" + str(autoname_id + 2)
Exemplo n.º 7
0
 def test_sparsevariable(self):
     # Get counter value
     if not sparse.enable_sparse:
         raise SkipTest('Optional package SciPy not installed')
     autoname_id = next(Variable.__count__)
     Variable.__count__ = count(autoname_id)
     r1 = sparse.csc_matrix(name='x', dtype='float32')
     r2 = sparse.dense_from_sparse(r1)
     r3 = sparse.csc_from_dense(r2)
     assert r1.auto_name == "auto_" + str(autoname_id)
     assert r2.auto_name == "auto_" + str(autoname_id + 1)
     assert r3.auto_name == "auto_" + str(autoname_id + 2)
Exemplo n.º 8
0
 def test0(self):
     a = tensor.as_tensor_variable(numpy.random.rand(5))
     s = csc_from_dense(a)
     val = eval_outputs([s])
     self.assertTrue(str(val.dtype) == "float64")
     self.assertTrue(val.format == "csc")
Exemplo n.º 9
0
 def test_sparse_from_dense(self):
     x = tensor.matrix()
     self._compile_and_check(
         [x], [csc_from_dense(x)], [numpy.random.randn(10, 40).astype(config.floatX)], csc_from_dense.__class__
     )
Exemplo n.º 10
0
x = T.matrix('x',dtype = 'float32') #T.matrix('x')   # the data is presented as rasterized images
#y = T.matrix('y')
#x.tag.test_value = np.random.rand(50,50)
#x.flatten()

#weights1 = gkern2(filter_shape[2],sigma).reshape(filter_shape)


input_shape = (51, 5)
inp_filter_shape = (5,5)
inp_filter_sigma = 7
L0_shape = (31,1)
filter_shape = (9,9)
L1_shape = (15,15)

layer0_input = sparse.csc_from_dense(x)

#final_shape = (s2*s3)

sigma = 1

#i_file = 'Wi_' + str(input_shape) + 'x' + str(L0_shape) + '_' + str(filter_shape[0]) + 's' + str(sigma) + '.npy'
#r_file = 'test_Wr.npy'
LR = np.cast['float32'](0.000001)
#aux = np.array(0.01,dtype='float32')
delta = np.cast['float32'](0.000001)
Wmax =np.cast['float32'](0.9)
Wmin = np.cast['float32'](0.0)
awe = np.cast['float32'](-0.7) # LR * int(W(*)e), usually negative

global generate
Exemplo n.º 11
0
# csc_matrix and csr_matrix. These can be called with the usual name
# and dtype parameters, but no broadcastable flags are allowed. This is
# forbidden since the sparse package, as the SciPy sparse module, does not
# provide any way to handle a number of dimensions different from two. The
# set of all accepted dtype for the sparse matrices can be found in
# sparse.all_dtypes.
print sparse.all_dtypes

# 2.1 To and Fro
# To move back and forth from a dense matrix to a sparse matrix
# representation, Theano provides the dense_from_sparse and csc_from_dense
# functions. No additional detail must be provided. Here is an example
# that performs a full cycle from sparse to sparse:
x = sparse.csc_matrix(name='x', dtype='float32')
y = sparse.dense_from_sparse(x)
z = sparse.csc_from_dense(y)

# 2.2 Properties and Construction
# Although sparse variables do not allow direct to their properties, this
# can be accomplished using the csm_properties function. This will return
# a tuple of one-dimensional tensor variables that represents the internal
# characteristics of the sparse matrix.

# In order to reconstruct a sparse matrix from some properties, the
# function CSC and CSR can be used. This will create the sparse matrix in
# desired format. As an example, the following code reconstructs a csc
# matrix into a csr one.
x = sparse.csc_matrix(name='x', dtype='int64')
data, indices, indptr, shape = sparse.csm_properties(x)
y = sparse.CSR(data, indices, indptr, shape)
f = theano.function([x], y)
Exemplo n.º 12
0
import numpy as np
import scipy.sparse as sp
import theano
from theano import sparse

# pylint: disable = bad-whitespace, invalid-name, no-member, bad-continuation, assignment-from-no-return

# if shape[0] > shape[1], use csr. Otherwise, use csc
# but, not all ops are available for both yet
# so use the one that has what you need

# to and fro

x = sparse.csc_matrix(name='x', dtype='float32')
y = sparse.dense_from_sparse(x)
z = sparse.csc_from_dense(y)

# resconstruct a csc from a csr

x = sparse.csc_matrix(name='x', dtype='int64')
data, indices, indptr, shape = sparse.csm_properties(x)
y = sparse.CSR(data, indices, indptr, shape)
f = theano.function([x], y)
a = sp.csc_matrix(np.asarray([[0, 1, 1], [0, 0, 0], [1, 0, 0]]))
print a.toarray()
print f(a).toarray()

# "structured" operations
# act only on (originally) nonzero elements

x = sparse.csc_matrix(name='x', dtype='float32')
Exemplo n.º 13
0
 def test0(self):
     a = tensor.as_tensor_variable(numpy.random.rand(5))
     s = csc_from_dense(a)
     val = eval_outputs([s])
     self.assertTrue(str(val.dtype) == 'float64')
     self.assertTrue(val.format == 'csc')
Exemplo n.º 14
0
  def __init__(self,fname,constants={},sparse=False):
    # parse model specification
    with open(fname,'r') as fid:
      mod = json.load(fid,object_pairs_hook=OrderedDict)
    self.mod = mod

    # constants
    self.con_dict = OrderedDict()
    for name in mod['constants']:
      value = constants[name]
      self.con_dict[name] = np.array(value) if type(value) is list else value

    # arguments
    self.arg_info = OrderedDict()
    self.arg_dict = OrderedDict()
    for (name,spec) in mod['arguments'].items():
      asize = spec['size']
      (amin,amax) = spec['range']
      agrid = np.linspace(amin,amax,asize)

      info = OrderedDict()
      info['size'] = asize
      info['grid'] = agrid

      self.arg_info[name] = info
      self.arg_dict[name] = agrid

    # parameters
    self.par_info = OrderedDict()
    self.par_sizes = []
    for (name,spec) in mod['parameters'].items():
      ptype = spec.get('type','scalar')
      psize = 1 if ptype == 'scalar' else spec['size']

      info = OrderedDict()
      info['type'] = ptype
      info['size'] = psize

      self.par_info[name] = info
      self.par_sizes.append(psize)

    # variables
    self.var_info = OrderedDict()
    self.var_sizes = []
    for (name,spec) in mod['variables'].items():
      vtype = spec['type']

      info = OrderedDict()
      info['type'] = vtype

      if vtype == 'scalar':
        vsize = 1
        self.var_sizes.append(vsize)
      elif vtype == 'vector':
        vsize = spec['size']
        self.var_sizes.append(vsize)
      elif vtype == 'function':
        vder = spec.get('derivs',[])
        nder = len(vder)
        args = spec['args']
        ainfo = [self.arg_info[arg] for arg in args]
        vsize = np.prod([ai['size'] for ai in ainfo])
        info['vder'] = vder
        info['nder'] = nder
        info['args'] = args
        info['shape'] = [self.arg_info[a]['size'] for a in args]
        info['grid'] = map(lambda v: v.transpose().flatten(),np.meshgrid(*[self.arg_info[a]['grid'] for a in args])) if len(args) > 1 else [self.arg_info[args[0]]['grid']]
        self.var_sizes.append(vsize)
        self.var_sizes += sum(map(len,vder))*[vsize]

      info['size'] = vsize
      self.var_info[name] = info

    # totals
    self.n_pars = len(self.par_info)
    self.n_vars = len(self.var_info)

    self.sz_pars = np.sum(self.par_sizes)
    self.sz_vars = np.sum(self.var_sizes)

    # input vectors
    self.par_vec = T.dvector('parvec')
    self.var_vec = T.dvector('varvec')

    # unpack and map out variables
    self.par_dict = OrderedDict()
    piter = iter(split(self.par_vec,self.par_sizes))
    for (name,info) in self.par_info.items():
      ptype = info['type']
      par = next(piter)
      if ptype == 'scalar':
        par = par[0]
        par.name = name
        self.par_dict[name] = par
      else:
        par.name = name
        self.par_dict[name] = par

    self.var_dict = OrderedDict()
    self.der_dict = OrderedDict()
    viter = iter(split(self.var_vec,self.var_sizes))
    for (name,info) in self.var_info.items():
      var = next(viter)
      vtype = info['type']
      if vtype == 'scalar':
        var = var[0]
        var.name = name
        self.var_dict[name] = var
      elif vtype == 'vector':
        var.name = name
        self.var_dict[name] = var
      elif vtype == 'function':
        var.name = name
        self.var_dict[name] = var
        vder = info.get('vder',[])
        nder = len(vder)
        self.der_dict[var] = {'': var}
        for der in vder:
          for s in prefixes(der):
            dvar = viter.next()
            dvar.name = name+'_'+s
            self.der_dict[var][s] = dvar

    # define operators
    def diff(var,*args):
      name = ''.join([getkey(self.arg_dict,v) for v in args])
      return self.der_dict[var][name]
    def vslice(var,arg,point):
      var_name = var.name
      arg_name = getkey(self.arg_dict,arg)
      var_info = self.var_info[var_name]
      args = var_info['args']
      (idx, _) = filter(lambda ia: ia[1]==arg_name, enumerate(args))[0]
      shape = var_info['shape']
      idx_list = slice_dim([point],idx,shape)
      return var[idx_list]
    def grid(var,arg):
      var_name = var.name
      arg_name = getkey(self.arg_dict,arg)
      var_info = self.var_info[var_name]
      args = var_info['args']
      (idx, _) = filter(lambda ia: ia[1]==arg_name, enumerate(args))[0]
      return var_info['grid'][idx]
    def interp(var,arg,x):
      i = icut(arg,x)
      t = np.clip((arg[i+1]-x)/(arg[i+1]-arg[i]),0.0,1.0)
      return t*vslice(var,arg,i) + (1.0-t)*vslice(var,arg,i+1)
    self.func_dict = {'diff': diff, 'slice': vslice, 'grid': grid, 'interp': interp}

    # combine them all
    self.sym_dict = merge(op_dict,self.con_dict,self.par_dict,self.var_dict,self.func_dict,self.arg_dict)

    # evaluate
    self.equations = []

    # regular equations
    for eq in mod['equations']:
      self.equations.append(eval(eq,{},self.sym_dict))

    # derivative relations
    for (name,info) in self.var_info.items():
      if info['type'] == 'function':
        var = self.var_dict[name]
        size = info['size']

        # derivative relations - symmetric except at 0
        vder = info.get('vder','')
        args = info['args']
        shape = info['shape']
        for der in vder:
          v0 = '' # function value
          for v1 in prefixes(der):
            # collect argument info
            arg = v1[-1]
            (adx, _) = filter(lambda ia: ia[1]==arg, enumerate(args))[0]
            s = shape[adx]
            grid = info['grid'][adx]

            # generate accessors
            zer_idx = slice_dim([0],adx,shape)
            one_idx = slice_dim([1],adx,shape)
            beg_idx = slice_dim(range(s-2),adx,shape)
            mid_idx = slice_dim(range(1,s-1),adx,shape)
            end_idx = slice_dim(range(2,s),adx,shape)

            # calculate derivatives
            d0 = self.der_dict[var][v0]
            d1 = self.der_dict[var][v1]
            self.equations.append(d0[one_idx]-d0[zer_idx]-(grid[one_idx]-grid[zer_idx])*d1[zer_idx])
            self.equations.append((d0[end_idx]-d0[beg_idx])-(grid[end_idx]-grid[beg_idx])*d1[mid_idx])

            # to next level
            v0 = v1

    # repack
    self.eqn_vec = T.join(0,*map(ensure_vector,self.equations))

    # jacobians
    self.par_jac = T.jacobian(self.eqn_vec,self.par_vec)
    self.var_jac = T.jacobian(self.eqn_vec,self.var_vec)

    # sparse?
    if sparse:
      self.par_jac = S.csc_from_dense(self.par_jac)
      self.var_jac = S.csc_from_dense(self.var_jac)
      self.linsolve = spsolve
    else:
      self.linsolve = np.linalg.solve

    # compile
    print('Compiling...')
    self.eqn_fun = theano.function([self.par_vec,self.var_vec],self.eqn_vec)
    self.parjac_fun = theano.function([self.par_vec,self.var_vec],self.par_jac)
    self.varjac_fun = theano.function([self.par_vec,self.var_vec],self.var_jac)

    # newtonian path
    t = T.dscalar('t')
    start = T.dvector('start')
    finish = T.dvector('finish')
    path = (1.0-t)*start + t*finish
    dpath = T.jacobian(path,t)
    self.path_fun = theano.function([start,finish,t],path)
    self.dpath_fun = theano.function([start,finish,t],dpath)