Example #1
0
def demo():

    import sys
    sys.path.append('../core')
    from tools import make_XOR_dataset
    from BR import BR
    set_printoptions(precision=3, suppress=True)

    X, Y = make_XOR_dataset()
    N, L = Y.shape

    print("CLASSIFICATION")
    h = linear_model.SGDClassifier(n_iter=100)
    nn = ELM(8, f=tanh, h=BR(-1, h))
    nn.fit(X, Y)
    # test it
    print(nn.predict(X))
    print("vs")
    print(Y)

    print("REGRESSION")
    r = ELM(100, h=linear_model.LinearRegression())
    r.fit(X, Y)
    print(Y)
    print(r.predict(X))

    print("REGRESSION OI")
    r = ELM_OI(100, h=BR(-1, h=linear_model.SGDRegressor()))
    r.fit(X, Y)
    print(Y)
    print(r.predict(X))
Example #2
0
class ML(LP):
    '''
        Meta-Label Classifier
        --------------------------
        Essentially 'RAkELd'; need to add pruning option (inherit PS instead of LP), 
        to make it a generic meta-label classifier.
    '''

    k = 3

    def fit(self, X, Y):
        Yy, self.reverse = transform_BR2ML(Y, self.k)
        N, self.L = Y.shape
        N_, L_ = Yy.shape
        from BR import BR
        self.h = BR(L_, copy.deepcopy(self.h))
        self.h.fit(X, Yy)
        return self

    def predict(self, X):
        '''
            return predictions for X
        '''
        Yy = self.h.predict(X)
        N, D = X.shape
        Y = transform_ML2BR(Yy, self.reverse, self.L, self.k)
        return Y
Example #3
0
    def train(self, X, Y, W_e=None, E=500, lr=0.1):
        '''
            X: input
            Y: output
            h: base classifier
        '''
        # 0. Extract Dimensions
        N, D = X.shape
        self.L = Y.shape[1]

        # 1. RBM

        from RBME import *
        self.rbm = RBM(num_visible=D, num_hidden=self.H, learning_rate=lr)
        self.rbm.train(X, max_epochs=E)
        Z = self.rbm.run_visible(X)

        #rng = random.RandomState(123)
        #from RBM import *
        #self.rbm = RBM(input=X,n_visible=D,n_hidden=self.H,numpy_rng=rng)
        #for epoch in xrange(100):
        #    self.rbm.contrastive_divergence(lr=0.01, k=1)
        #A,Z = self.rbm.sample_h_given_v(X)

        #print Z
        #from sklearn.neural_network import BernoulliRBM
        #self.rbm = BernoulliRBM(self.H, learning_rate = 0.01, n_iter = 10000)
        #self.rbm.fit(X)
        #Z = self.rbm.transform(X)
        #print Z

        # 2. Train final layer, h : YFX -> Y
        self.h = BR(self.L, self.h)
        self.h.train(Z, Y)
Example #4
0
 def fit(self, X, Y):
     Yy, self.reverse = transform_BR2ML(Y, self.k)
     N, self.L = Y.shape
     N_, L_ = Yy.shape
     from BR import BR
     self.h = BR(L_, copy.deepcopy(self.h))
     self.h.fit(X, Yy)
     return self
Example #5
0
    def __init__(self, num_classes):
        self.num_classes = num_classes  #21 in paper
        super(FCN_GCN, self).__init__()

        resnet = models.resnet50(pretrained=True)

        self.conv1 = resnet.conv1  # 7x7,64, stride=2 (output_size=112x112)
        self.bn0 = resnet.bn1  #BatchNorm2d(64)
        self.relu = resnet.relu
        self.maxpool = resnet.maxpool  # maxpool /2 (kernel_size=3, stride=2, padding=1)

        self.layer1 = resnet.layer1  #res-2 o/p = 56x56,256
        self.layer2 = resnet.layer2  #res-3 o/p = 28x28,512
        self.layer3 = resnet.layer3  #res-4 o/p = 14x14,1024
        self.layer4 = resnet.layer4  #res-5 o/p = 7x7,2048

        self.gcn1 = GCN(256, self.num_classes, 55)  #gcn_i after layer-1
        self.gnc2 = GCN(512, self.num_classes, 27)
        self.gcn3 = GCN(1024, self.num_classes, 13)
        self.gcn4 = GCN(2048, self.num_classes, 7)

        self.br1 = BR(num_classes)
        self.br2 = BR(num_classes)
        self.br3 = BR(num_classes)
        self.br4 = BR(num_classes)
        self.br5 = BR(num_classes)
        self.br6 = BR(num_classes)
        self.br7 = BR(num_classes)
        self.br8 = BR(num_classes)
        self.br9 = BR(num_classes)
Example #6
0
    def train(self, X, Y, W_e=None, E=500, lr=0.1):
        '''
            X: input
            Y: output
            h: base classifier
        '''
        # 0. Extract Dimensions
        N,D = X.shape
        self.L = Y.shape[1]

        # 1. RBM 

        from RBME import *
        self.rbm = RBM(num_visible = D, num_hidden = self.H, learning_rate = lr)
        self.rbm.train(X, max_epochs = E)
        Z = self.rbm.run_visible(X)

        #rng = random.RandomState(123)
        #from RBM import *
        #self.rbm = RBM(input=X,n_visible=D,n_hidden=self.H,numpy_rng=rng) 
        #for epoch in xrange(100):
        #    self.rbm.contrastive_divergence(lr=0.01, k=1)
        #A,Z = self.rbm.sample_h_given_v(X)

        #print Z 
        #from sklearn.neural_network import BernoulliRBM
        #self.rbm = BernoulliRBM(self.H, learning_rate = 0.01, n_iter = 10000)
        #self.rbm.fit(X)
        #Z = self.rbm.transform(X)
        #print Z 

        # 2. Train final layer, h : YFX -> Y
        self.h = BR(self.L,self.h)
        self.h.train(Z,Y)
    def __init__(self, in_channel):
        super(AdaMatting, self).__init__()

        # Encoder
        self.encoder_conv = nn.Sequential(
            nn.Conv2d(in_channel,
                      64,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=True), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
        encoder_inplanes = 64
        self.encoder_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.encoder_resblock1, encoder_inplanes = make_resblock(
            encoder_inplanes, 64, blocks=3, stride=2, block=Bottleneck)
        self.encoder_resblock2, encoder_inplanes = make_resblock(
            encoder_inplanes, 128, blocks=3, stride=2, block=Bottleneck)
        self.encoder_resblock3, encoder_inplanes = make_resblock(
            encoder_inplanes, 256, blocks=3, stride=2, block=Bottleneck)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                nn.init.constant_(m.bias, 0)

        #Boundary Refinement
        self.br1 = BR(64)
        self.br2 = BR(64 * Bottleneck.expansion)
        self.br3 = BR(128 * Bottleneck.expansion)

        #  RES boundary Shortcuts
        shortcut_inplanes = 64
        self.shortcut_shallow_intial, shortcut_inplanes = make_resblock(
            shortcut_inplanes, 256, blocks=1, stride=2, block=Bottleneck)
        self.shortcut_shallow = self.br1(self.shortcut_shallow_intial)
        self.shortcut_middle_initial, shortcut_inplanes = make_resblock(
            shortcut_inplanes, 256, blocks=1, stride=2, block=Bottleneck)
        self.shortcut_shallow = self.br2(self.shortcut_middle_initial)
        self.shortcut_deep_initial, shortcut_inplanes = make_resblock(
            shortcut_inplanes, 256, blocks=1, stride=2, block=Bottleneck)
        self.shortcut_deep = self.br3(self.shortcut_deep_initial)

        # Boundary GCN Shortcuts
        # self.shortcut_shallow_intial = GCN(64, 64)
        # self.shortcut_shallow = self.br1(self.shortcut_shallow_intial)
        # self.shortcut_middle_initial = GCN(64 * Bottleneck.expansion, 64 * Bottleneck.expansion)
        # self.shortcut_middle = self.br2(self.shortcut_middle_initial)
        # self.shortcut_deep_initial = GCN(128 * Bottleneck.expansion, 128 * Bottleneck.expansion)
        # self.shortcut_deep = self.br3(self.shortcut_deep_initial)

        # Original shortcuts
        # self.shortcut_shallow = GCN(64, 64)
        # self.shortcut_middle = GCN(64 * Bottleneck.expansion, 64 * Bottleneck.expansion)
        # self.shortcut_deep = GCN(128 * Bottleneck.expansion, 128 * Bottleneck.expansion)
        # Separate two middle shortcuts
        # self.shortcut_shallow = self.shortcut_block(64, 64)
        # self.shortcut_middle_a = self.shortcut_block(64 * Bottleneck.expansion, 64 * Bottleneck.expansion)
        # self.shortcut_middle_t = self.shortcut_block(64 * Bottleneck.expansion, 64 * Bottleneck.expansion)
        # self.shortcut_deep = self.shortcut_block(128 * Bottleneck.expansion, 128 * Bottleneck.expansion)

        # T-decoder
        self.t_decoder_upscale1 = nn.Sequential(
            self.decoder_unit(256 * Bottleneck.expansion, 512 * 4),
            self.decoder_unit(512 * 4, 512 * 4), nn.PixelShuffle(2))
        self.t_decoder_upscale2 = nn.Sequential(
            self.decoder_unit(512, 256 * 4),
            self.decoder_unit(256 * 4, 256 * 4), nn.PixelShuffle(2))
        self.t_decoder_upscale3 = nn.Sequential(
            self.decoder_unit(256, 64 * 4), self.decoder_unit(64 * 4, 64 * 4),
            nn.PixelShuffle(2))
        self.t_decoder_upscale4 = nn.Sequential(
            self.decoder_unit(64, 3 * (2**2)),
            self.decoder_unit(3 * (2**2), 3 * (2**2)), nn.PixelShuffle(2))

        # A-deocder
        self.a_decoder_upscale1 = nn.Sequential(
            self.decoder_unit(256 * Bottleneck.expansion, 512 * 4),
            self.decoder_unit(512 * 4, 512 * 4), nn.PixelShuffle(2))
        self.a_decoder_upscale2 = nn.Sequential(
            self.decoder_unit(512, 256 * 4),
            self.decoder_unit(256 * 4, 256 * 4), nn.PixelShuffle(2))
        self.a_decoder_upscale3 = nn.Sequential(
            self.decoder_unit(256, 64 * 4), self.decoder_unit(64 * 4, 64 * 4),
            nn.PixelShuffle(2))
        self.a_decoder_upscale4 = nn.Sequential(
            self.decoder_unit(64, 1 * (2**2)),
            self.decoder_unit(1 * (2**2), 1 * (2**2)), nn.PixelShuffle(2))

        # Propagation unit
        # self.propunit = PropUnit(
        #     input_dim=4 + 1 + 1,
        #     hidden_dim=[1],
        #     kernel_size=(3, 3),
        #     num_layers=3,
        #     seq_len=3,
        #     bias=True)
        self.prop_unit = nn.Sequential(
            nn.Conv2d(3 + 1 + 1,
                      64,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1, bias=True),
        )

        # Task uncertainty loss parameters
        self.log_sigma_t_sqr = nn.Parameter(torch.log(torch.Tensor([16.0])))
        self.log_sigma_a_sqr = nn.Parameter(torch.log(torch.Tensor([16.0])))
Example #8
0
class DRBM() :
    """
        DRBM
        -------------------
        Meta-RBM: Train an RBM, and then stick BR on top.
    """

    W = None

    H = 10
    L = -1

    h = None

    rbm = None

    def __init__(self, num_hidden=10, h=linear_model.LogisticRegression()):
        ''' for H hidden units, and base classifier 'h' '''
        self.H = num_hidden
        self.h = h

    def train(self, X, Y, W_e=None, E=500, lr=0.1):
        '''
            X: input
            Y: output
            h: base classifier
        '''
        # 0. Extract Dimensions
        N,D = X.shape
        self.L = Y.shape[1]

        # 1. RBM 

        from RBME import *
        self.rbm = RBM(num_visible = D, num_hidden = self.H, learning_rate = lr)
        self.rbm.train(X, max_epochs = E)
        Z = self.rbm.run_visible(X)

        #rng = random.RandomState(123)
        #from RBM import *
        #self.rbm = RBM(input=X,n_visible=D,n_hidden=self.H,numpy_rng=rng) 
        #for epoch in xrange(100):
        #    self.rbm.contrastive_divergence(lr=0.01, k=1)
        #A,Z = self.rbm.sample_h_given_v(X)

        #print Z 
        #from sklearn.neural_network import BernoulliRBM
        #self.rbm = BernoulliRBM(self.H, learning_rate = 0.01, n_iter = 10000)
        #self.rbm.fit(X)
        #Z = self.rbm.transform(X)
        #print Z 

        # 2. Train final layer, h : YFX -> Y
        self.h = BR(self.L,self.h)
        self.h.train(Z,Y)

    def predict(self, X):
        '''
            return predictions for X
        '''
        N,D = X.shape

        A = self.rbm.run_visible(X)
        #A,Z = self.rbm.sample_h_given_v(X)
        #print A,Z
        #Z = self.rbm.transform(X)

        # Propagate to final layer
        Y = self.h.predict(A > 0.5)
        return Y
Example #9
0
class DRBM():
    """
        DRBM
        -------------------
        Meta-RBM: Train an RBM, and then stick BR on top.
    """

    W = None

    H = 10
    L = -1

    h = None

    rbm = None

    def __init__(self, num_hidden=10, h=linear_model.LogisticRegression()):
        ''' for H hidden units, and base classifier 'h' '''
        self.H = num_hidden
        self.h = h

    def train(self, X, Y, W_e=None, E=500, lr=0.1):
        '''
            X: input
            Y: output
            h: base classifier
        '''
        # 0. Extract Dimensions
        N, D = X.shape
        self.L = Y.shape[1]

        # 1. RBM

        from RBME import *
        self.rbm = RBM(num_visible=D, num_hidden=self.H, learning_rate=lr)
        self.rbm.train(X, max_epochs=E)
        Z = self.rbm.run_visible(X)

        #rng = random.RandomState(123)
        #from RBM import *
        #self.rbm = RBM(input=X,n_visible=D,n_hidden=self.H,numpy_rng=rng)
        #for epoch in xrange(100):
        #    self.rbm.contrastive_divergence(lr=0.01, k=1)
        #A,Z = self.rbm.sample_h_given_v(X)

        #print Z
        #from sklearn.neural_network import BernoulliRBM
        #self.rbm = BernoulliRBM(self.H, learning_rate = 0.01, n_iter = 10000)
        #self.rbm.fit(X)
        #Z = self.rbm.transform(X)
        #print Z

        # 2. Train final layer, h : YFX -> Y
        self.h = BR(self.L, self.h)
        self.h.train(Z, Y)

    def predict(self, X):
        '''
            return predictions for X
        '''
        N, D = X.shape

        A = self.rbm.run_visible(X)
        #A,Z = self.rbm.sample_h_given_v(X)
        #print A,Z
        #Z = self.rbm.transform(X)

        # Propagate to final layer
        Y = self.h.predict(A > 0.5)
        return Y