Ejemplo n.º 1
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 itype='complex128',
                 sign_func=None):
        self.num_feature_hidden, self.itype = num_feature_hidden, itype
        nsite = np.prod(input_shape)
        eta = 0.1
        super(RBM, self).__init__(itype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(
            SPConv,
            weight=eta * typed_randn(self.itype,
                                     (self.num_feature_hidden, 1, nsite)),
            bias=eta * typed_randn(self.itype, (num_feature_hidden, )),
            boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(self.num_feature_hidden * nsite, ))
        self.add_layer(functions.Sum, axis=0)
        self.add_layer(functions.Exp)

        self._get_sign = sign_func
Ejemplo n.º 2
0
    def __init__(self, input_shape, num_feature_hidden, with_linear=False, use_msr=False, theta_period=2):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        dtype = 'float64'
        self.with_linear = with_linear
        nsite=np.prod(input_shape)
        eta=0.1
        super(Roger, self).__init__(dtype, do_shape_check=False)

        self.layers.append(functions.Reshape(input_shape, itype=dtype, output_shape=(1,)+input_shape))
        self.add_layer(SPConv, weight=eta*typed_randn(dtype, (self.num_feature_hidden, 1, nsite)),
                bias=eta*typed_randn(dtype, (num_feature_hidden,)), boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape, output_shape=(num_feature_hidden, nsite) if with_linear else (num_feature_hidden*nsite,))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if with_linear:
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (128, self.num_feature_hidden)),
                    bias=0*typed_randn(dtype, (128,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (64, 128)),
                    bias=0*typed_randn(dtype, (64,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (32, 64)),
                    bias=0*typed_randn(dtype, (32,)))
            self.add_layer(functions.ReLU)
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (1, 32)),
                    bias=0*typed_randn(dtype, (1,)))
            self.add_layer(functions.Reshape, output_shape=())

        if use_msr and theta_period!=2:
            raise ValueError()
        self.thnn = PSNN(input_shape, period=theta_period, batch_wise=False, output_mode='theta', use_msr=use_msr)
Ejemplo n.º 3
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 with_linear=False,
                 use_msr=False,
                 theta_period=2,
                 itype='float64'):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        self.with_linear = with_linear
        nsite = np.prod(input_shape)
        eta = 0.1
        super(WangLei2, self).__init__(itype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(SPConv,
                       weight=eta *
                       typed_randn(itype, (self.num_feature_hidden, 1, nsite)),
                       bias=eta * typed_randn(itype, (num_feature_hidden, )),
                       boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(num_feature_hidden,
                                     nsite) if with_linear else
                       (num_feature_hidden * nsite, ))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if with_linear:
            #self.add_layer(Linear, weight=eta*typed_randn(itype, (1, self.num_feature_hidden)),
            #        bias=0*typed_randn(itype, (1,)))
            self.add_layer(Linear,
                           weight=np.array([[1, 1, -1, -1]], dtype=itype),
                           bias=0 * typed_randn(itype, (1, )),
                           var_mask=(0, 0))
            self.add_layer(functions.Reshape, output_shape=())

        if use_msr and theta_period != 2:
            raise ValueError()
        if use_msr:
            self.thnn = psnn_msr(nsite=input_shape[0])
        else:
            self.thnn = PSNN(input_shape,
                             period=theta_period,
                             batch_wise=False,
                             output_mode='theta')
Ejemplo n.º 4
0
    def __init__(self, input_shape, NF=4, K=2, num_features=[12], eta0=0.2, eta1=0.2,
            itype='complex128',version='linear', dtype0='complex128', dtype1='complex128', stride=None):
        self.num_features = num_features
        if stride is None:
            if any([n%4!=0 for n in input_shape]):
                stride=2
            else:
                stride=1
        self.stride = stride
        nsite=np.prod(input_shape)
        eta=eta0
        super(WangLei4, self).__init__()

        D = len(input_shape)
        ishape = (1,)+input_shape
        self.layers.append(functions.Reshape(input_shape, itype=itype, output_shape=ishape))

        eta=eta1
        dtype = dtype0
        self.add_layer(functions.Log)
        #self.add_layer(SPConv, weight=eta*typed_randn(self.itype, (NF,1)+(K,)*D),
        #        bias=eta*typed_randn(self.itype, (NF,)), boundary='P', strides=(stride,)*D)
        self.add_layer(SPConv, weight=eta*typed_randn(dtype, (NF,1)+(K,)*D),
                bias=eta*typed_randn(dtype, (NF,)), boundary='P', strides=(stride,)*D)
        self.add_layer(functions.Exp)

        dtype = dtype1
        if version=='linear':
            self.add_layer(functions.Reshape, output_shape=(np.prod(self.layers[-1].output_shape),))
            self.add_layer(Linear, weight=eta*typed_randn(dtype, (num_features[0], self.layers[-1].output_shape[-1])),
                    bias=eta*typed_randn(dtype, (num_features[0],)),var_mask=(1,1))
        elif version=='conv':
            stride= 1
            imgsize = self.layers[-1].output_shape[-D:]
            self.add_layer(SPConv, weight=eta*typed_randn(dtype, (self.num_features[0], NF)+imgsize),
                    bias=eta*typed_randn(dtype, (num_features[0],)), boundary='P', strides=(stride,)*D)
            self.add_layer(functions.Reshape, output_shape=(num_features[0], np.prod(imgsize)//stride**D))

            self.add_layer(functions.Power,order=3)
            #self.add_layer(functions.Log2cosh)
            self.add_layer(functions.Mean, axis=-1)
        if version=='const-linear': 
            self.add_layer(Linear, weight=np.array([[-1,-1,1,1]],dtype=itype, order='F'),
                    bias=np.zeros((1,),dtype=itype),var_mask=(0,0))
        elif version=='linear' or version=='conv':
            for i,(nfi, nfo) in enumerate(zip(num_features, num_features[1:]+[1])):
                if i!=0:
                    self.add_layer(functions.ReLU)
                self.add_layer(Linear, weight=eta*typed_randn(dtype, (nfo, nfi)),
                        bias=eta*typed_randn(dtype, (nfo,)),var_mask=(1,1))
        elif version=='rbm':
            pass
        else:
            raise ValueError('version %s not exist'%version)
        self.add_layer(functions.Reshape, output_shape=())
Ejemplo n.º 5
0
    def __init__(self,
                 input_shape,
                 num_feature_hidden,
                 mlp_shape,
                 use_msr=False,
                 theta_period=2):
        self.num_feature_hidden = num_feature_hidden
        self.use_msr = use_msr
        dtype = 'float64'
        nsite = np.prod(input_shape)
        eta = 0.1
        super(RTheta_MLP_EXP, self).__init__(dtype, do_shape_check=False)

        self.layers.append(
            functions.Reshape(input_shape,
                              itype=dtype,
                              output_shape=(1, ) + input_shape))
        self.add_layer(SPConv,
                       weight=eta *
                       typed_randn(dtype, (self.num_feature_hidden, 1, nsite)),
                       bias=eta * typed_randn(dtype, (num_feature_hidden, )),
                       boundary='P')
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Reshape,
                       output_shape=(num_feature_hidden, nsite))
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        self.add_layer(Linear,
                       weight=eta *
                       typed_randn(dtype,
                                   (mlp_shape[0], self.num_feature_hidden)),
                       bias=0 * typed_randn(dtype, (mlp_shape[0], )))
        self.add_layer(functions.ReLU)
        for i in range(len(mlp_shape) - 1):
            self.add_layer(Linear,
                           weight=eta *
                           typed_randn(dtype,
                                       (mlp_shape[i + 1], mlp_shape[i])),
                           bias=0.1 * typed_randn(dtype, (mlp_shape[i + 1], )))
            self.add_layer(functions.ReLU)
        self.add_layer(Linear,
                       weight=eta * typed_randn(dtype, (1, mlp_shape[-1])),
                       bias=0 * typed_randn(dtype, (1, )))
        # self.add_layer(functions.ReLU)
        self.add_layer(functions.Exp)
        self.add_layer(functions.Reshape, output_shape=())
        if use_msr and theta_period != 2:
            raise ValueError()
        self.thnn = PSNN(input_shape,
                         period=theta_period,
                         batch_wise=False,
                         output_mode='theta',
                         use_msr=use_msr)
Ejemplo n.º 6
0
def build_ann():
    '''
    builds a single layer network for mnist classification problem.
    '''
    F1 = 10
    I1, I2 = 28, 28
    eta = 0.1
    dtype = 'float32'

    W_fc1 = typed_randn(dtype, (F1, I1 * I2)) * eta
    b_fc1 = typed_randn(dtype, (F1, )) * eta

    # create an empty vertical network.
    ann = ANN()
    linear1 = Linear((-1, I1 * I2), dtype, W_fc1, b_fc1)
    ann.layers.append(linear1)
    ann.add_layer(functions.SoftMaxCrossEntropy, axis=1)
    ann.add_layer(functions.Mean, axis=0)
    return ann
Ejemplo n.º 7
0
    def __init__(self, input_shape, num_features=[12], itype='float64',use_conv=True,version='linear'):
        self.num_features, self.itype = num_features, itype
        nsite=np.prod(input_shape)
        eta=0.2
        super(WangLei, self).__init__(itype, do_shape_check=True)

        stride = nsite
        if not use_conv:
            num_feature_hidden=num_features[0]*nsite
            self.layers.append(Linear(input_shape, itype, weight=eta*typed_randn(self.itype, (num_feature_hidden, nsite)),
                    bias=eta*typed_randn(self.itype, (num_feature_hidden,))))
        else:
            self.layers.append(functions.Reshape(input_shape, itype=itype, output_shape=(1,)+input_shape))
            self.add_layer(SPConv, weight=eta*typed_randn(self.itype, (self.num_features[0], 1, nsite)),
                    bias=eta*typed_randn(self.itype, (num_features[0],)), boundary='P', strides=(stride,))
            self.add_layer(functions.Reshape, output_shape=(num_features[0], nsite//stride))
        self.add_layer(functions.Log2cosh)
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.Exp)
        if version=='const-linear': 
            self.add_layer(Linear, weight=np.array([[-1,-1,1,1]],itype=itype, order='F'),
                    bias=np.zeros((1,),itype=itype),var_mask=(0,0))
        elif version=='linear':
            for nfi, nfo in zip(num_features, num_features[1:]+[1]):
                self.add_layer(Linear, weight=eta*typed_randn(self.itype, (nfo, nfi)),
                        bias=eta*typed_randn(self.itype, (nfo,)),var_mask=(1,1))
        elif version=='rbm':
            pass
        else:
            raise ValueError('version %s not exist'%version)
        self.add_layer(functions.Reshape, output_shape=())
Ejemplo n.º 8
0
    def __init__(self, input_shape, use_msr=False, theta_period=2):
        self.use_msr = use_msr
        itype = 'float64'
        nsite = np.prod(input_shape)
        eta = 0.1
        self.vec = eta * typed_randn(itype, input_shape)

        if use_msr and theta_period != 2:
            raise ValueError()
        self.thnn = PSNN(input_shape,
                         period=theta_period,
                         batch_wise=False,
                         output_mode='theta',
                         use_msr=use_msr)
Ejemplo n.º 9
0
    def __init__(self, input_shape, nfs, itype='complex128'):
        assert (len(nfs) == 4)
        eta = 1
        nsite = np.prod(input_shape)
        self.nfs, self.itype = nfs, itype
        DIM = len(input_shape)

        POOLING_MODE = 'max-abs'

        super(ConvWF, self).__init__(itype=itype, do_shape_check=False)
        self.layers.append(
            functions.Reshape(input_shape,
                              itype=itype,
                              output_shape=(1, ) + input_shape))

        self.add_layer(SPConv,
                       weight=typed_randn(itype,
                                          (self.nfs[0], 1, nsite)) * eta,
                       bias=typed_randn(itype, (nfs[0], )) * eta,
                       boundary='P')
        self.add_layer(functions.Pooling,
                       kernel_shape=(2, ) * DIM,
                       mode=POOLING_MODE)
        self.add_layer(functions.ReLU)

        self.add_layer(
            SPConv,
            weight=typed_randn(itype, (self.nfs[1], self.nfs[0], nsite)) * eta,
            bias=typed_randn(itype, (nfs[1], )) * eta,
            boundary='P')
        self.add_layer(functions.Pooling,
                       kernel_shape=(2, ) * DIM,
                       mode=POOLING_MODE)
        self.add_layer(functions.ReLU)

        nf1_ = np.prod(self.layers[-1].output_shape)
        self.add_layer(functions.Reshape, output_shape=(nf1_, ))

        self.add_layer(Apdot,
                       weight=typed_randn(itype, (nfs[3], nfs[1])) * eta,
                       bias=typed_randn(itype, (nfs[3], )) * eta)
        self.add_layer(Linear,
                       weight=typed_randn(itype, (1, nfs[3])) * eta,
                       bias=typed_randn(itype, (1, )) * eta)

        self.add_layer(functions.Reshape, output_shape=())
        self._shapes = None
Ejemplo n.º 10
0
    def __init__(self,
                 input_shape,
                 period,
                 kernel='cos',
                 nf=4,
                 batch_wise=False,
                 output_mode='theta',
                 use_msr=False):
        self.period = period
        self.batch_wise = batch_wise
        if batch_wise:
            num_batch = input_shape[0]
            site_shape = input_shape[1:]
        else:
            num_batch = 1
            site_shape = input_shape
        nsite = np.prod(site_shape)
        eta = 0.1
        super(PSNN, self).__init__('float64' if output_mode ==
                                   'theta' else 'complex128')

        dtype = 'float64'
        self.layers.append(
            functions.Reshape(input_shape,
                              itype='float64',
                              output_shape=(num_batch, 1) + site_shape))
        if use_msr:
            weight = np.array([[[np.pi / 2, 0]]])
            bias = np.array([np.pi / 2])
            var_mask = (0, 0)
        else:
            weight = eta * typed_randn('float64', (nf, 1, nsite))
            bias = eta * typed_randn('float64', (nf, ))
            var_mask = (1, 1)
        self.add_layer(SPConv,
                       weight=weight,
                       bias=bias,
                       strides=(period, ),
                       boundary='P',
                       var_mask=var_mask)
        self.add_layer(functions.Reshape,
                       output_shape=(num_batch, nf, nsite // period))
        #self.add_layer(functions.Cos)
        self.add_layer(functions.Sum, axis=-1)
        self.add_layer(functions.ReLU)
        self.add_layer(Linear,
                       weight=eta * typed_randn(dtype, (1, nf)),
                       bias=0 * typed_randn(dtype, (1, )))

        if output_mode != 'theta':
            if kernel == 'exp':
                self.add_layer(functions.TypeCast, otype='complex128')
                self.add_layer(functions.Mul, alpha=1j)
                self.add_layer(functions.Exp)
                self.add_layer(functions.TypeCast, otype='float64')
            elif kernel == 'cos':
                self.add_layer(functions.Cos)
        if output_mode == 'loss':
            self.add_layer(functions.SquareLoss)
            self.add_layer(functions.Mean, axis=0)

        if not batch_wise:
            self.add_layer(functions.Reshape, output_shape=())
Ejemplo n.º 11
0
    def __init__(self,
                 input_shape,
                 num_features1=[12],
                 num_features2=[],
                 itype='float64',
                 version='basic',
                 eta=0.2,
                 use_conv=False,
                 preprocessing=False):
        self.num_features1, self.num_features2 = num_features1, num_features2
        nsite = np.prod(input_shape)
        super(CaiZi, self).__init__()

        # create amplitude network
        if not preprocessing:
            net1 = ANN(layers=[
                functions.Reshape(
                    input_shape, itype=itype, output_shape=(1, ) + input_shape)
            ])
            NF = 1
        else:
            # preprocessing
            plnn = ParallelNN(axis=0)
            for power in [[1, 1], [1, 0, 1]]:
                plnn.layers.append(
                    functions.ConvProd(input_shape,
                                       itype,
                                       powers=power,
                                       boundary='P',
                                       strides=(1, )))
            NF = 2
            net1 = ANN(layers=[plnn])

        for i, (nfi, nfo) in enumerate(
                zip([np.prod(input_shape)] + num_features1,
                    num_features1 + [1])):
            if use_conv[0] and i == 0:
                net1.add_layer(SPConv,
                               weight=eta *
                               typed_randn(self.itype, (nfo, NF, nsite)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
                net1.add_layer(functions.Transpose, axes=(1, 0))
            else:
                net1.add_layer(Linear,
                               weight=eta *
                               typed_randn(self.itype, (nfo, nfi)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
            if version == 'basic':
                net1.add_layer(functions.Tanh)
            elif version == 'sigmoid':
                net1.add_layer(functions.Sigmoid)
            else:
                raise
        if use_conv[0]:
            net1.add_layer(functions.Mean, axis=0)
        net1.add_layer(functions.Reshape, output_shape=())

        # create sign network
        net2 = ANN(layers=[
            functions.Reshape(
                input_shape, itype=itype, output_shape=(1, ) + input_shape)
        ])
        for i, (nfi, nfo) in enumerate(
                zip([np.prod(input_shape)] + num_features2,
                    num_features2 + [1])):
            if use_conv[1] and i == 0:
                net2.add_layer(SPConv,
                               weight=eta *
                               typed_randn(self.itype, (nfo, 1, nsite)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
                net2.add_layer(functions.Transpose, axes=(1, 0))
            else:
                net2.add_layer(Linear,
                               weight=eta *
                               typed_randn(self.itype, (nfo, nfi)),
                               bias=eta * typed_randn(self.itype, (nfo, )))
            net2.add_layer(functions.Mul, alpha=np.pi)
            net2.add_layer(functions.Cos)
        if use_conv[1]:
            net2.add_layer(functions.Mean, axis=0)
        net2.add_layer(functions.Reshape, output_shape=())

        # construct whole network
        self.layers.append(ParallelNN(layers=[net1, net2]))
        self.add_layer(functions.Prod, axis=0)
        print(check_numdiff(self))