Exemple #1
0
 def __init__(
         self,
         input_shape,  #(batch_size, input_size)
         latent_dim=2,
         epoch=5,
         units=1000,
         pre=None,
         dec=None,
         lr_ch=(5, 1.1),
         modeldir='model',
         outdir='result',
         cmap=plt.get_cmap('viridis'),
 ):
     self.input_shape = input_shape
     self.latent_dim = latent_dim
     self.epoch = epoch
     self.lr_ch = lr_ch
     self.shot = epoch // lr_ch[0]
     self.modeldir = modeldir
     self.outdir = outdir
     if not pre:
         pre = rm.Sequential([rm.Dense(units), rm.Relu()])
         enc = Enc(pre, latent_dim)
     if not dec:
         dec = rm.Sequential([
             rm.Dense(units),
             rm.Relu(),
             rm.Dense(input_shape[-1]),
             rm.Sigmoid()
         ])
     self.ae = VAE(enc, dec, latent_dim)
     self.cmap = cmap
    def __init__(
            self, 
            enc_base, dec,
            batch_size,
            latent_dim = 2,
            mode = 'simple',
            label_dim = 0,
            prior = 'normal', 
            prior_dist = None,
            hidden = 1000,
            full_rate=0.1, # 全体の形を重視するかラベル毎を重視するか
            fm_rate=1., # full_rateと同じ目的
        ):
        self.latent_dim = latent_dim
        self.mode = mode
        self.label_dim = label_dim
        self.batch_size = batch_size
        self.prior = prior
        self.prior_dist = prior_dist
        self.full_rate = full_rate
        self.fm_rate = fm_rate

        if self.mode=='clustering' or self.mode=='reduction':
            self.enc = Enc(enc_base, (latent_dim, label_dim),
            output_act=(None, rm.softmax))
        else:
            self.enc = Enc(enc_base, latent_dim)
        self.dec = dec
        self.dis = rm.Sequential([
            rm.Dense(hidden), rm.LeakyRelu(),
            rm.Dense(hidden), rm.LeakyRelu(),
            rm.Dense(1), rm.Sigmoid()
        ])
        if self.mode=='clustering' or self.mode=='reduction':
            self.cds = rm.Sequential([
                # xxx rm.BatchNormalize(), 
                # Disの最初にBNは配置してはだめ
                rm.Dense(hidden), rm.LeakyRelu(),
                rm.BatchNormalize(),
                rm.Dense(hidden), rm.LeakyRelu(),
                #rm.BatchNormalize(),
                rm.Dense(1), rm.Sigmoid()
            ])
 def __init__(self, latent_dim):
     self.latent_dim = latent_dim
     self.enc = rm.Sequential([
         #rm.BatchNormalize(),
         #rm.Dense(100),
         #rm.Dropout(),
         #rm.Relu(),
         rm.Dense(latent_dim),
         rm.Tanh()
     ])
     self.dec = rm.Sequential([
         #rm.BatchNormalize(),
         #rm.Dense(100),
         #rm.Dropout(),
         #rm.LeakyRelu(),
         rm.Dense(28 * 28),
         rm.Sigmoid()
     ])
Exemple #4
0
def exp_dense():
    np.random.seed(10)
    cuda.set_cuda_active(False)
    a = np.random.rand(32, 320).astype(np.float32)
    b = np.random.rand(32, 80).astype(np.float32)
    layer1 = rm.Dense(input_size=320, output_size=100)
    layer2 = rm.Dense(input_size=100, output_size=80)
    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)
    opt = Sgd(0.01, momentum=0.3)
    start_t = time.time()

    for _ in range(500):
        loss = rm.Sum((layer2(rm.Sigmoid(layer1(ga))) - gb)**2) / 32
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
    print(time.time() - start_t)
Exemple #5
0
def exp_convolution2():
    np.random.seed(10)
    cuda.set_cuda_active(True)
    a = np.random.randn(8, 3, 12, 12).astype(np.float32)
    b = np.random.randn(8, 16, 10, 10).astype(np.float32)
    layer1 = rm.Conv2d(channel=16, input_size=a.shape[1:])

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.001, momentum=0.3)
    start_t = time.time()
    for _ in range(100000):
        loss = rm.Sum((rm.Sigmoid(layer1(ga)) - gb)**2) / 8
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
        del loss
    print(time.time() - start_t)
    rm.Dense(hidden),
    rm.Relu(),  #rm.Dropout(),
    # xxx rm.BatchNormalize(),
    # Genの最後にBNを配置してはだめ
    rm.Dense(latent_dim, initializer=Uniform())
])
dec = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(28 * 28),
    rm.Sigmoid()
])

ae = AAE(enc_base,
         dec,
         batch_size,
         latent_dim=latent_dim,
         hidden=200,
         prior=model_dist,
         mode=model_type,
         label_dim=10)

dis_opt = rm.Adam(lr=0.0005, b=0.5)
enc_opt = rm.Adam(lr=0.0005, b=0.5)

N = len(x_train)
Exemple #7
0
    def __init__(
            self,
            input_params=32768,
            first_shape=(1, 32, 32, 32),
            output_shape=(1, 1, 64, 64),
            check_network=False,
            batchnormal=True,
            dropout=False,
            down_factor=1.6,
            act=rm.Relu(),
            last_act=rm.Sigmoid(),
    ):
        self.input_params = input_params
        self.latent_dim = input_params
        self.first_shape = first_shape
        self.output_shape = output_shape
        self.act = act
        self.last_act = last_act
        self.down_factor = down_factor

        def decide_factor(src, dst):
            factor = np.log(src / dst) / np.log(2)
            if factor % 1 == 0:
                return factor
            return np.ceil(factor)

        ch = first_shape[1]
        v_factor = decide_factor(output_shape[2], first_shape[2])
        h_factor = decide_factor(output_shape[3], first_shape[3])
        v_dim, h_dim = first_shape[2], first_shape[3]
        parameters = []
        check_params = np.array(first_shape[1:]).prod()
        self.trans = False
        if input_params != check_params:
            if check_network:
                print('--- Decoder Network ---')
                print('inserting Dense({})'.format(check_params))
            self.trans = rm.Dense(check_params)
        while v_factor != 0 or h_factor != 0:
            if batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            stride = (2 if v_factor > 0 else 1, 2 if h_factor > 0 else 1)
            if check_network:
                print('transpose2d ch={}, filter=2, stride={}'.format(
                    ch, stride))
            parameters.append(rm.Deconv2d(channel=ch, filter=2, stride=stride))
            if self.act:
                parameters.append(self.act)
            if ch > output_shape[1]:
                ch = int(np.ceil(ch / self.down_factor))
            v_dim = v_dim * 2 if v_factor > 0 else v_dim + 1
            h_dim = h_dim * 2 if h_factor > 0 else h_dim + 1
            v_factor = v_factor - 1 if v_factor > 0 else 0
            h_factor = h_factor - 1 if h_factor > 0 else 0
        if v_dim > output_shape[2] or h_dim > output_shape[2]:
            last_filter = (v_dim - output_shape[2] + 1,
                           h_dim - output_shape[3] + 1)
            if check_network:
                print('conv2d filter={}, stride=1'.format(last_filter))
            parameters.append(
                rm.Conv2d(channel=output_shape[1],
                          filter=last_filter,
                          stride=1))
        self.parameters = rm.Sequential(parameters)
        if check_network:
            self.forward(np.zeros((first_shape[0], input_params)),
                         print_parameter=True)
        check_network = debug,
    )    
else: # fully connected network
    batch_shape = (batch_size, 28*28)
    x_train = x_train.reshape(-1, 28*28)
    x_test = x_test.reshape(-1, 28*28)
    enc_pre = rm.Sequential([
        rm.Dense(hidden), rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(hidden, initializer=Uniform()), rm.Relu()
    ])
    enc = Enc(enc_pre, latent_dim)
    dec = rm.Sequential([
        rm.Dense(hidden), rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(28*28), rm.Sigmoid(),
    ])
vae = VAE(enc, dec, latent_dim)

N = len(x_train)
for e in range(epoch):
    if not e%shot_period == shot_period - 1:
        continue
    outdir = 'model/{}'.format(model_id)
    suffix = int(e//shot_period)+1
    enc.load('{}/enc{}.h5'.format(outdir, suffix))
    dec.load('{}/dec{}.h5'.format(outdir, suffix))
    #vae = VAE(enc, dec, latent_dim)
    vae.set_models(inference=True)
    decd = vae(x_test[:batch_size]).as_ndarray()
    lvec = vae.enc.zm.as_ndarray()
Exemple #9
0
x_axis = np.linspace(-np.pi, np.pi, N)
base = np.sin(x_axis)
y_axis = base + noise
x_axis = x_axis.reshape(N, 1)
y_axis = y_axis.reshape(N, 1)
idx = random.permutation(N)
train_idx = idx[::2]
test_idx = idx[1::2]
train_x = x_axis[train_idx]
train_y = y_axis[train_idx]
test_x = x_axis[test_idx]
test_y = y_axis[test_idx]

seq_model = rm.Sequential(
    [rm.Dense(1), rm.Dense(10),
     rm.Sigmoid(), rm.Dense(1)])

optimizer = rm.Sgd(0.1, momentum=0.5)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 2, figsize=(4, epoch_splits))

curve = [[], []]
for e in range(epoch):
    with seq_model.train():
        loss = rm.mean_squared_error(seq_model(train_x), train_y)
    grad = loss.grad()
    grad.update(optimizer)
    curve[0].append(loss.as_ndarray())
    loss = rm.mean_squared_error(seq_model(test_x), test_y)