示例#1
0
    def __init__(self, input_shape, num_features=[12], itype='float64',use_conv=True,version='linear'):
        self.num_features, self.itype = num_features, itype
        nsite=np.prod(input_shape)
        eta=0.2
        super(WangLei, self).__init__(itype, do_shape_check=True)

        stride = nsite
        if not use_conv:
            num_feature_hidden=num_features[0]*nsite
            self.layers.append(Linear(input_shape, itype, weight=eta*typed_randn(self.itype, (num_feature_hidden, nsite)),
                    bias=eta*typed_randn(self.itype, (num_feature_hidden,))))
        else:
            self.layers.append(functions.Reshape(input_shape, itype=itype, output_shape=(1,)+input_shape))
            self.add_layer(SPConv, weight=eta*typed_randn(self.itype, (self.num_features[0], 1, nsite)),
                    bias=eta*typed_randn(self.itype, (num_features[0],)), boundary='P', strides=(stride,))
            self.add_layer(functions.Reshape, output_shape=(num_features[0], nsite//stride))
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if version=='const-linear': 
            self.add_layer(Linear, weight=np.array([[-1,-1,1,1]],itype=itype, order='F'),
                    bias=np.zeros((1,),itype=itype),var_mask=(0,0))
        elif version=='linear':
            for nfi, nfo in zip(num_features, num_features[1:]+[1]):
                self.add_layer(Linear, weight=eta*typed_randn(self.itype, (nfo, nfi)),
                        bias=eta*typed_randn(self.itype, (nfo,)),var_mask=(1,1))
        elif version=='rbm':
            pass
        else:
            raise ValueError('version %s not exist'%version)
        self.add_layer(functions.Reshape, output_shape=())
示例#2
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 itype='complex128',
                 sign_func=None):
        self.num_feature_hidden, self.itype = num_feature_hidden, itype
        nsite = np.prod(input_shape)
        eta = 0.1
        super(RBM, self).__init__(itype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(
            SPConv,
            weight=eta * typed_randn(self.itype,
                                     (self.num_feature_hidden, 1, nsite)),
            bias=eta * typed_randn(self.itype, (num_feature_hidden, )),
            boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(self.num_feature_hidden * nsite, ))
        self.add_layer(functions.Sum, axis=0)
        self.add_layer(functions.Exp)

        self._get_sign = sign_func
示例#3
0
    def __init__(self, input_shape, num_feature_hidden, with_linear=False, use_msr=False, theta_period=2):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        dtype = 'float64'
        self.with_linear = with_linear
        nsite=np.prod(input_shape)
        eta=0.1
        super(Roger, self).__init__(dtype, do_shape_check=False)

        self.layers.append(functions.Reshape(input_shape, itype=dtype, output_shape=(1,)+input_shape))
        self.add_layer(SPConv, weight=eta*typed_randn(dtype, (self.num_feature_hidden, 1, nsite)),
                bias=eta*typed_randn(dtype, (num_feature_hidden,)), boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape, output_shape=(num_feature_hidden, nsite) if with_linear else (num_feature_hidden*nsite,))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if with_linear:
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (128, self.num_feature_hidden)),
                    bias=0*typed_randn(dtype, (128,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (64, 128)),
                    bias=0*typed_randn(dtype, (64,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (32, 64)),
                    bias=0*typed_randn(dtype, (32,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (1, 32)),
                    bias=0*typed_randn(dtype, (1,)))
            self.add_layer(functions.Reshape, output_shape=())

        if use_msr and theta_period!=2:
            raise ValueError()
        self.thnn = PSNN(input_shape, period=theta_period, batch_wise=False, output_mode='theta', use_msr=use_msr)
示例#4
0
    def __init__(self, input_shape, NF=4, K=2, num_features=[12], eta0=0.2, eta1=0.2,
            itype='complex128',version='linear', dtype0='complex128', dtype1='complex128', stride=None):
        self.num_features = num_features
        if stride is None:
            if any([n%4!=0 for n in input_shape]):
                stride=2
            else:
                stride=1
        self.stride = stride
        nsite=np.prod(input_shape)
        eta=eta0
        super(WangLei4, self).__init__()

        D = len(input_shape)
        ishape = (1,)+input_shape
        self.layers.append(functions.Reshape(input_shape, itype=itype, output_shape=ishape))

        eta=eta1
        dtype = dtype0
        self.add_layer(functions.Log)
        #self.add_layer(SPConv, weight=eta*typed_randn(self.itype, (NF,1)+(K,)*D),
        #        bias=eta*typed_randn(self.itype, (NF,)), boundary='P', strides=(stride,)*D)
        self.add_layer(SPConv, weight=eta*typed_randn(dtype, (NF,1)+(K,)*D),
                bias=eta*typed_randn(dtype, (NF,)), boundary='P', strides=(stride,)*D)
        self.add_layer(functions.Exp)

        dtype = dtype1
        if version=='linear':
            self.add_layer(functions.Reshape, output_shape=(np.prod(self.layers[-1].output_shape),))
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (num_features[0], self.layers[-1].output_shape[-1])),
                    bias=eta*typed_randn(dtype, (num_features[0],)),var_mask=(1,1))
        elif version=='conv':
            stride= 1
            imgsize = self.layers[-1].output_shape[-D:]
            self.add_layer(SPConv, weight=eta*typed_randn(dtype, (self.num_features[0], NF)+imgsize),
                    bias=eta*typed_randn(dtype, (num_features[0],)), boundary='P', strides=(stride,)*D)
            self.add_layer(functions.Reshape, output_shape=(num_features[0], np.prod(imgsize)//stride**D))

            self.add_layer(functions.Power,order=3)
            #self.add_layer(functions.Log2cosh)
            self.add_layer(functions.Mean, axis=-1)
        if version=='const-linear': 
            self.add_layer(Linear, weight=np.array([[-1,-1,1,1]],dtype=itype, order='F'),
                    bias=np.zeros((1,),dtype=itype),var_mask=(0,0))
        elif version=='linear' or version=='conv':
            for i,(nfi, nfo) in enumerate(zip(num_features, num_features[1:]+[1])):
                if i!=0:
                    self.add_layer(functions.ReLU)
                self.add_layer(Linear, weight=eta*typed_randn(dtype, (nfo, nfi)),
                        bias=eta*typed_randn(dtype, (nfo,)),var_mask=(1,1))
        elif version=='rbm':
            pass
        else:
            raise ValueError('version %s not exist'%version)
        self.add_layer(functions.Reshape, output_shape=())
示例#5
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 mlp_shape,
                 use_msr=False,
                 theta_period=2):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        dtype = 'float64'
        nsite = np.prod(input_shape)
        eta = 0.1
        super(RTheta_MLP_EXP, self).__init__(dtype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=dtype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(SPConv,
                       weight=eta *
                       typed_randn(dtype, (self.num_feature_hidden, 1, nsite)),
                       bias=eta * typed_randn(dtype, (num_feature_hidden, )),
                       boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(num_feature_hidden, nsite))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        self.add_layer(Linear,
                       weight=eta *
                       typed_randn(dtype,
                                   (mlp_shape[0], self.num_feature_hidden)),
                       bias=0 * typed_randn(dtype, (mlp_shape[0], )))
        self.add_layer(functions.ReLU)
        for i in range(len(mlp_shape) - 1):
            self.add_layer(Linear,
                           weight=eta *
                           typed_randn(dtype,
                                       (mlp_shape[i + 1], mlp_shape[i])),
                           bias=0.1 * typed_randn(dtype, (mlp_shape[i + 1], )))
            self.add_layer(functions.ReLU)
        self.add_layer(Linear,
                       weight=eta * typed_randn(dtype, (1, mlp_shape[-1])),
                       bias=0 * typed_randn(dtype, (1, )))
        # self.add_layer(functions.ReLU)
        self.add_layer(functions.Exp)
        self.add_layer(functions.Reshape, output_shape=())
        if use_msr and theta_period != 2:
            raise ValueError()
        self.thnn = PSNN(input_shape,
                         period=theta_period,
                         batch_wise=False,
                         output_mode='theta',
                         use_msr=use_msr)
示例#6
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 with_linear=False,
                 use_msr=False,
                 theta_period=2,
                 itype='float64'):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        self.with_linear = with_linear
        nsite = np.prod(input_shape)
        eta = 0.1
        super(WangLei2, self).__init__(itype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(SPConv,
                       weight=eta *
                       typed_randn(itype, (self.num_feature_hidden, 1, nsite)),
                       bias=eta * typed_randn(itype, (num_feature_hidden, )),
                       boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(num_feature_hidden,
                                     nsite) if with_linear else
                       (num_feature_hidden * nsite, ))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if with_linear:
            #self.add_layer(Linear, weight=eta*typed_randn(itype, (1, self.num_feature_hidden)),
            #        bias=0*typed_randn(itype, (1,)))
            self.add_layer(Linear,
                           weight=np.array([[1, 1, -1, -1]], dtype=itype),
                           bias=0 * typed_randn(itype, (1, )),
                           var_mask=(0, 0))
            self.add_layer(functions.Reshape, output_shape=())

        if use_msr and theta_period != 2:
            raise ValueError()
        if use_msr:
            self.thnn = psnn_msr(nsite=input_shape[0])
        else:
            self.thnn = PSNN(input_shape,
                             period=theta_period,
                             batch_wise=False,
                             output_mode='theta')
示例#7
0
    def __init__(self, input_shape, nfs, itype='complex128'):
        assert (len(nfs) == 4)
        eta = 1
        nsite = np.prod(input_shape)
        self.nfs, self.itype = nfs, itype
        DIM = len(input_shape)

        POOLING_MODE = 'max-abs'

        super(ConvWF, self).__init__(itype=itype, do_shape_check=False)
        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))

        self.add_layer(SPConv,
                       weight=typed_randn(itype,
                                          (self.nfs[0], 1, nsite)) * eta,
                       bias=typed_randn(itype, (nfs[0], )) * eta,
                       boundary='P')
        self.add_layer(functions.Pooling,
                       kernel_shape=(2, ) * DIM,
                       mode=POOLING_MODE)
        self.add_layer(functions.ReLU)

        self.add_layer(
            SPConv,
            weight=typed_randn(itype, (self.nfs[1], self.nfs[0], nsite)) * eta,
            bias=typed_randn(itype, (nfs[1], )) * eta,
            boundary='P')
        self.add_layer(functions.Pooling,
                       kernel_shape=(2, ) * DIM,
                       mode=POOLING_MODE)
        self.add_layer(functions.ReLU)

        nf1_ = np.prod(self.layers[-1].output_shape)
        self.add_layer(functions.Reshape, output_shape=(nf1_, ))

        self.add_layer(Apdot,
                       weight=typed_randn(itype, (nfs[3], nfs[1])) * eta,
                       bias=typed_randn(itype, (nfs[3], )) * eta)
        self.add_layer(Linear,
                       weight=typed_randn(itype, (1, nfs[3])) * eta,
                       bias=typed_randn(itype, (1, )) * eta)

        self.add_layer(functions.Reshape, output_shape=())
        self._shapes = None
示例#8
0
    def __init__(self, input_shape, nonlinear_list, powerlist=None, num_features=[4,4,4], eta0=0.2, eta1=0.2, NP=1, NC=1, K=None,\
            itype='complex128',dtype0='complex128', dtype1='complex128', momentum=0., isym = False,
                    usesum=False,poly_order=10, do_BN=False,is_unitary=False, **kwargs):
        if K is None:
            K = np.prod(input_shape)
        self.num_features = num_features
        self.do_BN = do_BN
        self.poly_order = poly_order
        self.eta0, self.eta1 = eta0, eta1
        self.isym = isym
        nsite = np.prod(input_shape)
        D = len(input_shape)
        ishape = input_shape

        def _is_unitary(inl):
            if is_unitary is True:
                return True
            elif is_unitary is False:
                return False
            else:
                return is_unitary[inl]

        super(WangLei6, self).__init__(**kwargs)

        # preprocessing
        if powerlist is not None:
            plnn = ParallelNN(axis=0)
            for power in powerlist:
                plnn.layers.append(
                    functions.ConvProd(ishape,
                                       itype,
                                       powers=power,
                                       boundary='P',
                                       strides=(1, ) * D))
                if isym:
                    plnn.layers.append(
                        ANN(layers=[
                            functions.Reverse(ishape, itype, axis=-1),
                            functions.ConvProd(ishape,
                                               itype,
                                               powers=power,
                                               boundary='P',
                                               strides=(1, ) * D)
                        ]))
            self.layers.append(plnn)
            nfo = len(plnn.layers)
        else:
            nfo = 1

        inl = -1
        # product layers.
        dtype = dtype0
        eta = eta0
        if NP != 0: self.add_layer(functions.Log, otype='complex128')
        for inl in range(NP):
            nfi, nfo = nfo, num_features[inl]
            self.add_layer(SPConv,
                           weight=eta * typed_uniform(dtype,
                                                      (nfo, nfi) + (K, ) * D),
                           bias=eta * typed_uniform(dtype, (nfo, )),
                           boundary='P',
                           strides=(1, ) * D,
                           is_unitary=_is_unitary(inl))
            self.use_nonlinear(nonlinear_list[inl])
        if NP != 0: self.add_layer(functions.Exp)
        if self.num_layers == 0:
            self.layers.append(
                functions.Reshape(ishape,
                                  itype=itype,
                                  output_shape=(1, ) + ishape))
        # convolution layers.
        eta = eta1
        dtype = self.layers[-1].otype
        for nfi, nfo in zip([nfo] + num_features[NP:NP + NC - 1],
                            num_features[NP:NP + NC]):
            inl = inl + 1
            self.add_layer(SPConv,
                           weight=eta *
                           typed_uniform(dtype, (nfo, nfi) + input_shape),
                           bias=eta * typed_uniform(dtype, (nfo, )),
                           boundary='P',
                           strides=(1, ) * D,
                           is_unitary=_is_unitary(inl))
            self.use_nonlinear(nonlinear_list[inl])
        self.add_layer(functions.Filter, axes=(-1, ), momentum=momentum)
        inl = inl + 1
        self.use_nonlinear(nonlinear_list[inl])

        # linear layers.
        if usesum:
            self.add_layer(functions.Mean, axis=-1)
            inl = inl + 1
            self.use_nonlinear(nonlinear_list[inl])
        else:
            for i, (nfi, nfo) in enumerate(
                    zip(num_features[NP + NC - 1:],
                        num_features[NP + NC:] + [1])):
                self.add_layer(Linear,
                               weight=eta * typed_uniform(dtype, (nfo, nfi)),
                               bias=eta * typed_uniform(dtype, (nfo, )),
                               var_mask=(1, 1),
                               is_unitary=_is_unitary(inl))
                if do_BN:
                    self.add_layer(functions.BatchNorm,
                                   axis=None,
                                   label='BN-%s' % i)
                    self.add_layer(pfunctions.Poly,
                                   params=np.array([0, 1.], dtype=dtype1),
                                   kernel='polynomial',
                                   factorial_rescale=True)
                inl = inl + 1
                self.use_nonlinear(nonlinear_list[inl])
        print(check_numdiff(self))
示例#9
0
    def __init__(self,
                 input_shape,
                 itype,
                 powerlist,
                 num_features=[12],
                 fixbias=False,
                 version='conv',
                 stride=1,
                 eta=0.2,
                 usesum=False,
                 nonlinear='x^3',
                 momentum=0.,
                 poly_order=10,
                 with_exp=False,
                 factorial_rescale=False,
                 nonlinear_mask=[False, False],
                 **kwargs):
        self.num_features = num_features
        nsite = np.prod(input_shape)

        D = len(input_shape)
        ishape = (1, ) + input_shape
        super(WangLei3, self).__init__(layers=[
            functions.Reshape(input_shape, itype=itype, output_shape=ishape)
        ],
                                       **kwargs)

        NF = len(powerlist)
        plnn = ParallelNN(axis=1)
        for power in powerlist:
            plnn.layers.append(
                functions.ConvProd(input_shape=ishape,
                                   itype=itype,
                                   powers=power,
                                   boundary='P',
                                   strides=(stride, ) * D))
        self.layers.append(plnn)
        if version == 'linear':
            self.add_layer(functions.Reshape,
                           output_shape=(np.prod(
                               self.layers[-1].output_shape), ))
            self.add_layer(
                Linear,
                weight=eta * typed_uniform(
                    self.itype,
                    (num_features[0], self.layers[-1].output_shape[-1])),
                bias=(0 if fixbias else eta) *
                typed_uniform(self.itype, (num_features[0], )),
                var_mask=(1, 0 if fixbias else 1))
        elif version == 'conv':
            stride = 1
            imgsize = self.layers[-1].output_shape[-D:]
            self.add_layer(SPConv,
                           weight=eta *
                           typed_uniform(self.itype,
                                         (self.num_features[0], NF) + imgsize),
                           bias=(0 if fixbias else eta) *
                           typed_uniform(self.itype, (num_features[0], )),
                           boundary='P',
                           strides=(stride, ) * D,
                           var_mask=(1, 0 if fixbias else 1))
            self.add_layer(functions.Reshape,
                           output_shape=(num_features[0],
                                         np.prod(imgsize) // stride**D))

            if nonlinear == 'x^3':
                self.add_layer(functions.Power, order=3)
            elif nonlinear == 'x^5':
                self.add_layer(functions.Power, order=5)
            elif nonlinear == 'relu':
                self.add_layer(functions.ReLU)
            elif nonlinear == 'sinh':
                self.add_layer(functions.Sinh)
            elif nonlinear == 'log2cosh':
                self.add_layer(functions.Log2cosh)
            elif nonlinear == 'mobius':
                self.add_layer(layers.Mobius,
                               params=np.array([0, 1, 1e20], dtype=itype),
                               var_mask=[True, True, False])
            elif nonlinear == 'softmax':
                self.add_layer(functions.SoftMax, axis=-1)
            elif nonlinear == 'sin':
                self.add_layer(functions.Sin)
            elif nonlinear in layers.Poly.kernel_dict:
                self.add_layer(layers.Poly,
                               params=eta *
                               typed_uniform(itype, (poly_order, )),
                               kernel=nonlinear,
                               factorial_rescale=factorial_rescale)
            else:
                raise Exception
            #self.add_layer(functions.Filter, axes=(-1,), momentum=momentum)
            self.add_layer(functions.Mean, axis=-1)
            if nonlinear_mask[0]:
                self.add_layer(functions.Sinh,
                               params=eta *
                               typed_uniform(itype, (poly_order, )),
                               kernel='legendre',
                               factorial_rescale=factorial_rescale)
                #self.add_layer(layers.Poly, params=eta*typed_uniform(itype, (poly_order,)), kernel='legendre', factorial_rescale=factorial_rescale)
        if version == 'const-linear':
            self.add_layer(Linear,
                           weight=np.array([[-1, -1, 1, 1]],
                                           dtype=itype,
                                           order='F'),
                           bias=np.zeros((1, ), dtype=itype),
                           var_mask=(0, 0))
        elif version == 'linear' or version == 'conv':
            if usesum:
                self.add_layer(functions.Mean, axis=-1)
            else:
                for i, (nfi, nfo) in enumerate(
                        zip(num_features, num_features[1:] + [1])):
                    self.add_layer(Linear,
                                   weight=eta *
                                   typed_uniform(self.itype, (nfo, nfi)),
                                   bias=(0 if fixbias else eta) *
                                   typed_uniform(self.itype, (nfo, )),
                                   var_mask=(1, 0 if fixbias else 1))
                    #self.add_layer(layers.Poly, params=eta*typed_uniform(itype,(10,)))
        elif version == 'rbm':
            pass
        else:
            raise ValueError('version %s not exist' % version)
        if with_exp:
            self.add_layer(functions.Exp)
        self.add_layer(functions.Reshape, output_shape=())
        if nonlinear_mask[1]:
            self.add_layer(layers.Poly,
                           params=eta * typed_uniform(itype, (poly_order, )),
                           kernel='legendre',
                           factorial_rescale=factorial_rescale)
示例#10
0
    def __init__(self,
                 input_shape,
                 period,
                 kernel='cos',
                 nf=4,
                 batch_wise=False,
                 output_mode='theta',
                 use_msr=False):
        self.period = period
        self.batch_wise = batch_wise
        if batch_wise:
            num_batch = input_shape[0]
            site_shape = input_shape[1:]
        else:
            num_batch = 1
            site_shape = input_shape
        nsite = np.prod(site_shape)
        eta = 0.1
        super(PSNN, self).__init__('float64' if output_mode ==
                                   'theta' else 'complex128')

        dtype = 'float64'
        self.layers.append(
            functions.Reshape(input_shape,
                              itype='float64',
                              output_shape=(num_batch, 1) + site_shape))
        if use_msr:
            weight = np.array([[[np.pi / 2, 0]]])
            bias = np.array([np.pi / 2])
            var_mask = (0, 0)
        else:
            weight = eta * typed_randn('float64', (nf, 1, nsite))
            bias = eta * typed_randn('float64', (nf, ))
            var_mask = (1, 1)
        self.add_layer(SPConv,
                       weight=weight,
                       bias=bias,
                       strides=(period, ),
                       boundary='P',
                       var_mask=var_mask)
        self.add_layer(functions.Reshape,
                       output_shape=(num_batch, nf, nsite // period))
        #self.add_layer(functions.Cos)
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.ReLU)
        self.add_layer(Linear,
                       weight=eta * typed_randn(dtype, (1, nf)),
                       bias=0 * typed_randn(dtype, (1, )))

        if output_mode != 'theta':
            if kernel == 'exp':
                self.add_layer(functions.TypeCast, otype='complex128')
                self.add_layer(functions.Mul, alpha=1j)
                self.add_layer(functions.Exp)
                self.add_layer(functions.TypeCast, otype='float64')
            elif kernel == 'cos':
                self.add_layer(functions.Cos)
        if output_mode == 'loss':
            self.add_layer(functions.SquareLoss)
            self.add_layer(functions.Mean, axis=0)

        if not batch_wise:
            self.add_layer(functions.Reshape, output_shape=())
示例#11
0
    def __init__(self,
                 input_shape,
                 num_features1=[12],
                 num_features2=[],
                 itype='float64',
                 version='basic',
                 eta=0.2,
                 use_conv=False,
                 preprocessing=False):
        self.num_features1, self.num_features2 = num_features1, num_features2
        nsite = np.prod(input_shape)
        super(CaiZi, self).__init__()

        # create amplitude network
        if not preprocessing:
            net1 = ANN(layers=[
                functions.Reshape(
                    input_shape, itype=itype, output_shape=(1, ) + input_shape)
            ])
            NF = 1
        else:
            # preprocessing
            plnn = ParallelNN(axis=0)
            for power in [[1, 1], [1, 0, 1]]:
                plnn.layers.append(
                    functions.ConvProd(input_shape,
                                       itype,
                                       powers=power,
                                       boundary='P',
                                       strides=(1, )))
            NF = 2
            net1 = ANN(layers=[plnn])

        for i, (nfi, nfo) in enumerate(
                zip([np.prod(input_shape)] + num_features1,
                    num_features1 + [1])):
            if use_conv[0] and i == 0:
                net1.add_layer(SPConv,
                               weight=eta *
                               typed_randn(self.itype, (nfo, NF, nsite)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
                net1.add_layer(functions.Transpose, axes=(1, 0))
            else:
                net1.add_layer(Linear,
                               weight=eta *
                               typed_randn(self.itype, (nfo, nfi)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
            if version == 'basic':
                net1.add_layer(functions.Tanh)
            elif version == 'sigmoid':
                net1.add_layer(functions.Sigmoid)
            else:
                raise
        if use_conv[0]:
            net1.add_layer(functions.Mean, axis=0)
        net1.add_layer(functions.Reshape, output_shape=())

        # create sign network
        net2 = ANN(layers=[
            functions.Reshape(
                input_shape, itype=itype, output_shape=(1, ) + input_shape)
        ])
        for i, (nfi, nfo) in enumerate(
                zip([np.prod(input_shape)] + num_features2,
                    num_features2 + [1])):
            if use_conv[1] and i == 0:
                net2.add_layer(SPConv,
                               weight=eta *
                               typed_randn(self.itype, (nfo, 1, nsite)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
                net2.add_layer(functions.Transpose, axes=(1, 0))
            else:
                net2.add_layer(Linear,
                               weight=eta *
                               typed_randn(self.itype, (nfo, nfi)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
            net2.add_layer(functions.Mul, alpha=np.pi)
            net2.add_layer(functions.Cos)
        if use_conv[1]:
            net2.add_layer(functions.Mean, axis=0)
        net2.add_layer(functions.Reshape, output_shape=())

        # construct whole network
        self.layers.append(ParallelNN(layers=[net1, net2]))
        self.add_layer(functions.Prod, axis=0)
        print(check_numdiff(self))
示例#12
0
    def __init__(self, input_shape, K=2, num_features=[4,4,4], eta0=0.2, eta1=0.2, NP=1, NC=1,\
            itype='complex128',dtype0='complex128', dtype1='complex128', momentum=0.,
                    stride=None, usesum=False, nonlinear='x^3',poly_order=10):
        self.num_features, self.itype = num_features, itype
        if stride is None:
            if any([n % 4 != 0 for n in input_shape]):
                stride = 2
            else:
                stride = 1
        self.stride = stride
        nsite = np.prod(input_shape)
        super(WangLei5, self).__init__(itype, do_shape_check=False)

        D = len(input_shape)
        ishape = (1, ) + input_shape
        self.layers.append(
            functions.Reshape(input_shape, itype=itype, output_shape=ishape))
        imgsize = self.layers[-1].output_shape[-D:]

        # product layers.
        dtype = dtype0
        eta = eta0
        self.add_layer(functions.Log)
        for nfi, nfo in zip([1] + num_features[:NP - 1], num_features[:NP]):
            self.add_layer(SPConv,
                           weight=eta * typed_uniform(dtype,
                                                      (nfo, nfi) + (K, ) * D),
                           bias=eta * typed_uniform(dtype, (nfo, )),
                           boundary='P',
                           strides=(stride, ) * D)
            imgsize = self.layers[-1].output_shape[-D:]
        self.add_layer(functions.Exp)

        # convolution layers.
        eta = eta1
        dtype = dtype1
        stride = 1
        for nfi, nfo in zip(num_features[NP - 1:NP + NC - 1],
                            num_features[NP:NP + NC]):
            self.add_layer(SPConv,
                           weight=eta * typed_uniform(dtype,
                                                      (nfo, nfi) + imgsize),
                           bias=eta * typed_uniform(dtype, (nfo, )),
                           boundary='P',
                           strides=(stride, ) * D)
            imgsize = self.layers[-1].output_shape[-D:]
        self.add_layer(functions.Reshape,
                       output_shape=(nfo, np.prod(imgsize) // stride**D))

        # non-linear function
        if nonlinear == 'x^3':
            self.add_layer(functions.Power, order=3)
        elif nonlinear == 'x^5':
            self.add_layer(functions.Power, order=5)
        elif nonlinear == 'relu':
            self.add_layer(functions.ReLU)
        elif nonlinear == 'sinh':
            self.add_layer(functions.Sinh)
        elif nonlinear in layers.Poly.kernel_dict:
            self.add_layer(layers.Poly,
                           params=eta0 * typed_uniform('complex128',
                                                       (poly_order, )),
                           kernel=nonlinear)
        else:
            raise Exception
        self.add_layer(functions.Filter, axes=(-1, ), momentum=momentum)

        # linear layers.
        if usesum:
            self.add_layer(functions.Mean, axis=-1)
        else:
            for i, (nfi, nfo) in enumerate(
                    zip(num_features[NP + NC - 1:],
                        num_features[NP + NC:] + [1])):
                if i != 0:
                    self.add_layer(functions.ReLU)
                self.add_layer(Linear,
                               weight=eta * typed_uniform(dtype, (nfo, nfi)),
                               bias=eta * typed_uniform(dtype, (nfo, )),
                               var_mask=(1, 1))
            self.add_layer(functions.Reshape, output_shape=())