class AutoEncoder:
    """
    Constract AutoEncoder by #input and #hidden
    """
    def __init__(self, xn, hn):
        self.model = FunctionSet(encode=F.Linear(xn, hn), decode=F.Linear(hn, xn))

    def encode(self, x, train=True):
        h = F.dropout(F.relu(self.model.encode(x)), train=train)
        return h

    def decode(self, h, train=True):
        y = F.dropout(F.relu(self.model.decode(h)), train=train)
        return y

    def train_once(self, x_data):
        x = Variable(x_data)
        h = self.encode(x)
        y = self.decode(h)
        return F.mean_squared_error(x, y)#, F.accuracy(x, y)

    def reconstract(self, x_data):
        x = Variable(x_data)
        h = self.encode(x, train=False)
        y = self.decode(h, train=False)
        return y.data
Example #2
0
class ConvolutionalDenoisingAutoencoder():
    def __init__(self, imgsize, n_in_channels, n_out_channels, ksize, stride=1, pad=0, use_cuda=False):
        self.model = FunctionSet(
            encode=F.Convolution2D(n_in_channels, n_out_channels, ksize, stride, pad),
            decode=F.Linear(n_out_channels*(math.floor((imgsize+2*pad-ksize)/stride)+1)**2, n_in_channels*imgsize**2)
        )
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return self.model.decode(x_var)

    def predict(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        if self.use_cuda:
            return cuda.to_cpu(p.data)
        else:
            return p.data

    def cost(self, x_data):
        x = Variable(x_data)
        t = Variable(x_data.reshape(x_data.shape[0], x_data.shape[1]*x_data.shape[2]*x_data.shape[3]))
        h = F.dropout(x)
        h = self.encode(h)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        if self.use_cuda:
            return float(cuda.to_cpu(loss.data))
        else:
            return loss.data

    def test(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        loss = self.cost(x_data)
        return float(cuda.to_cpu(loss.data))
Example #3
0
class DenoisingAutoencoder(SuperClass):
    def __init__(self, n_in, n_hidden, n_epoch=20, batchsize=100, use_cuda=False):
        super().__init__(n_epoch, batchsize, use_cuda)
        
        self.model = FunctionSet(
            encode=F.Linear(n_in, n_hidden),
            decode=F.Linear(n_hidden, n_in)
        )
        self.registModel()

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return self.model.decode(x_var)

    def predict(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        return self.procOutput(p.data)

    def cost(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        t = Variable(x_data)
        h = self.encode(F.dropout(t))
        y = self.decode(h)
        return self.procOutput(F.mean_squared_error(y, x))

    def test(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        t = Variable(x_data)
        h = self.encode(t)
        y = self.decode(h)
        return self.procOutput(F.mean_squared_error(y, x))

    def save(self, filedir, n_hidden, n_epoch, batchsize):
        name = "SdA_"+ "layer"+str(n_hidden) + "_epoch"+str(n_epoch)
        param = {}
        param['W'] = self.model.encode.parameters[0]
        param['b'] = self.model.encode.parameters[1]
        pickle.dump(param, open(filedir+'/'+name+'.pkl', 'wb'), pickle.HIGHEST_PROTOCOL)
        return
    
    def load(self, filename):
        if filename.find('.pkl')==-1:
            filename = filename + '.pkl'
        param = pickle.load(open(filename, 'rb'))
        self.model.encode.parameters = (param['W'], param['b'])
        return
Example #4
0
class DenoisingAutoEncoder():
    def __init__(self, n_in, n_hidden, use_cuda=False):
        self.model = FunctionSet(encode=F.Linear(n_in, n_hidden),
                                 decode=F.Linear(n_hidden, n_in))
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return F.sigmoid(self.model.decode(x_var))

    def predict(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        return cuda.to_cpu(p.data)

    def cost(self, x_data):
        x = Variable(x_data)
        t = Variable(x_data)
        x_n = F.dropout(x)
        h = self.encode(x_n)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        return float(cuda.to_cpu(loss.data))

    def test(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        loss = self.cost(x_data)
        return float(cuda.to_cpu(loss.data))
class MNISTNet():
    def __init__(self):
        n_in = 28 * 28
        n_hidden = 100
        self.model = FunctionSet(
            encode=F.Linear(n_in, n_hidden),
            decode=F.Linear(n_hidden, n_in)
        )
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return F.sigmoid(self.model.decode(x_var))

    def predict(self, x_data):
        x = Variable(x_data)
        p = self.encode(x)
        return p.data

    def cost(self, x_data, dropout=True):
        x = Variable(x_data)
        t = Variable(x_data)
        if dropout:
            x_n = F.dropout(x, ratio=0.4)
        else: 
        	x_n = x
        h = self.encode(x_n)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        return float(loss.data)
class MNISTNet():
    def __init__(self):
        n_in = 28 * 28
        n_hidden = 100
        self.model = FunctionSet(encode=F.Linear(n_in, n_hidden),
                                 decode=F.Linear(n_hidden, n_in))
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return F.sigmoid(self.model.decode(x_var))

    def predict(self, x_data):
        x = Variable(x_data)
        p = self.encode(x)
        return p.data

    def cost(self, x_data, dropout=True):
        x = Variable(x_data)
        t = Variable(x_data)
        if dropout:
            x_n = F.dropout(x, ratio=0.4)
        else:
            x_n = x
        h = self.encode(x_n)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        return float(loss.data)
Example #7
0
class ConvolutionalDenoisingAutoencoder():
    def __init__(self,
                 imgsize,
                 n_in_channels,
                 n_out_channels,
                 ksize,
                 stride=1,
                 pad=0,
                 use_cuda=False):
        self.model = FunctionSet(
            encode=F.Convolution2D(n_in_channels, n_out_channels, ksize,
                                   stride, pad),
            decode=F.Linear(
                n_out_channels * (math.floor(
                    (imgsize + 2 * pad - ksize) / stride) + 1)**2,
                n_in_channels * imgsize**2))
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return self.model.decode(x_var)

    def predict(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        if self.use_cuda:
            return cuda.to_cpu(p.data)
        else:
            return p.data

    def cost(self, x_data):
        x = Variable(x_data)
        t = Variable(
            x_data.reshape(x_data.shape[0], x_data.shape[1] * x_data.shape[2] *
                           x_data.shape[3]))
        h = F.dropout(x)
        h = self.encode(h)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        if self.use_cuda:
            return float(cuda.to_cpu(loss.data))
        else:
            return loss.data

    def test(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        loss = self.cost(x_data)
        return float(cuda.to_cpu(loss.data))