コード例 #1
0
    def test_save_loss_graphs_no_class_weight(self):
        x = np.random.uniform(-1, 1, self.x_shape)
        x = Variable(x.astype(np.float32))
        t = np.random.randint(
            0, 12, (self.x_shape[0], self.x_shape[2], self.x_shape[3]))
        t = Variable(t.astype(np.int32))

        for depth in six.moves.range(1, self.n_encdec + 1):
            model = segnet.SegNet(n_encdec=self.n_encdec,
                                  n_classes=12,
                                  in_channel=self.x_shape[1])
            model = segnet.SegNetLoss(model,
                                      class_weight=None,
                                      train_depth=depth)
            y = model(x, t)
            cg = build_computational_graph([y],
                                           variable_style=_var_style,
                                           function_style=_func_style).dump()
            for e in range(1, self.n_encdec + 1):
                self.assertTrue(
                    'encdec{}'.format(e) in model.predictor._children)

            fn = 'tests/SegNet_xt_depth-{}_{}.dot'.format(self.n_encdec, depth)
            if os.path.exists(fn):
                continue
            with open(fn, 'w') as f:
                f.write(cg)
            subprocess.call('dot -Tpng {} -o {}'.format(
                fn, fn.replace('.dot', '.png')),
                            shell=True)
コード例 #2
0
 def get_xt(self):
     x = np.random.uniform(-1, 1, self.x_shape)
     x = Variable(x.astype(np.float32))
     t = np.random.randint(
         0, self.n_classes,
         (self.x_shape[0], self.x_shape[2], self.x_shape[3]))
     t = Variable(t.astype(np.int32))
     return x, t
コード例 #3
0
def test():
    args = parse_args()
    cfg = Config.from_file(args.config)

    data_path = cfg.test.source_data
    out = cfg.test.out
    model_path = cfg.test.gen_path

    print('GPU: {}'.format(args.gpu))

    # number of gesture classes and users
    n_gesture = cfg.train.n_gesture
    n_user = len(cfg.train.dataset_dirs)
    n_style = n_gesture if cfg.style == 'gesture' else n_user

    ## Import generator model
    gen = getattr(models, cfg.train.generator.model)(cfg.train.generator,
                                                     n_style=n_style)
    serializers.load_npz(model_path, gen)
    print('')
    print(f'loading generator weight from {model_path}')

    ## Set GPU
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()

    test_data = data_load(data_path, ges_class=cfg.test.ges_class)
    test_data = np.expand_dims(test_data, axis=1)

    style_label = np.zeros((test_data.shape[0], n_style * 2, 1, 1))
    source = cfg.test.ges_class if cfg.style == 'gesture' else cfg.source_user
    target = cfg.test.target_style
    style_label[:, source] += 1
    style_label[:, target + n_style] += 1

    test_data = Variable(cuda.to_gpu(test_data.astype(np.float32)))
    style_label = Variable(cuda.to_gpu(style_label.astype(np.float32)))

    with chainer.using_config('train', False), chainer.using_config(
            'enable_backprop', False):
        gen_data = gen(test_data, style_label)
    gen_data.to_cpu()

    if cfg.style == 'gesture':
        save_path = f'./{out}/user{cfg.source_user}/ges{target}_from_ges{source}'
    else:
        save_path = f'./{out}/user{target}/ges{cfg.test.ges_class}_from_user{source}'
    if not os.path.exists(os.path.dirname(save_path)):
        os.makedirs(os.path.dirname(save_path))
    print('saving generated data to ' + save_path + '.npy')
    np.save(save_path, gen_data.data)
コード例 #4
0
    def real_batch(self, iterator):
        batch = iterator.next()
        x, y = self.converter(batch, self.device)

        c = x[:, -3:, :, :]
        x = x[:, :-3, :, :]

        #cast 16bit -> 32bit (cannot use tensor core)
        x = Variable(x.astype('float32'))
        y = Variable(y.astype('float32'))
        c = Variable(c.astype('float32'))

        return x, y, c
コード例 #5
0
ファイル: models.py プロジェクト: AdrianGScorp/deeprlbootcamp
    def update(self, trajs):
        obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)
        if self.concat_time:
            ts = np.concatenate([np.arange(len(traj['observations'])) / self.env_spec.timestep_limit for traj in trajs],
                                axis=0)
            obs = np.concatenate([obs, ts[:, None]], axis=-1)
        returns = np.concatenate([traj['returns'] for traj in trajs], axis=0)
        baselines = np.concatenate([traj['baselines']
                                    for traj in trajs], axis=0)

        # regress to a mixture of current and past predictions
        targets = returns * (1. - self.mixture_fraction) + \
            baselines * self.mixture_fraction

        # use lbfgs to perform the update
        cur_params = get_flat_params(self)

        obs = Variable(obs)
        targets = Variable(targets.astype(np.float32))

        def f_loss_grad(x):
            set_flat_params(self, x)
            self.cleargrads()
            values = self.compute_baselines(obs)
            loss = F.mean(F.square(values - targets))
            loss.backward()
            flat_grad = get_flat_grad(self)
            return loss.data.astype(np.float64), flat_grad.astype(np.float64)

        new_params = scipy.optimize.fmin_l_bfgs_b(
            f_loss_grad, cur_params, maxiter=10)[0]

        set_flat_params(self, new_params)
コード例 #6
0
    def real_batch(self, iter_key='main'):
        batch = self.get_iterator(iter_key).next()
        batch = self.converter(batch, self.device)

        if isinstance(batch, tuple) or isinstance(batch, list):
            x, t = batch

            #16bit -> 32bit (not use tensor core)
            x = Variable(x.astype('float32'))
            t = Variable(t.astype('float32'))

            return x, t

        x = Variable(batch.astype('float32'))

        return x, None
コード例 #7
0
    def compute_loss(self, state, action, reward, next_state, episode_ends):
        batchsize = state.shape[0]
        xp = self.dqn.model.xp

        with chainer.using_config("train", True):
            q = self.dqn.compute_q_value(state)
        with chainer.no_backprop_mode():
            max_target_q_data = self.dqn.compute_target_q_value(
                next_state).data
            max_target_q_data = xp.amax(max_target_q_data, axis=1)

        t = reward + (1 -
                      episode_ends) * self.discount_factor * max_target_q_data
        t = Variable(xp.reshape(t.astype(xp.float32), (-1, 1)))

        y = functions.reshape(functions.select_item(q, action), (-1, 1))

        if self.clip_loss:
            loss = functions.huber_loss(t, y, delta=1.0)
        else:
            loss = functions.mean_squared_error(t, y) / 2
        loss = functions.sum(loss)

        # check NaN
        loss_value = float(loss.data)
        if loss_value != loss_value:
            import pdb
            pdb.set_trace()
        return loss
コード例 #8
0
ファイル: dram2.py プロジェクト: oyucube/traffic
    def make_img(self, x, l, num_lm, random=0):
        if random == 0:
            lm = Variable(xp.clip(l.data, 0, 1))
        else:
            eps = xp.random.normal(0, 1, size=l.data.shape).astype(xp.float32)
            lm = xp.clip(l.data + eps * xp.sqrt(self.vars), 0, 1)

            lm = Variable(lm.astype(xp.float32))
        if self.use_gpu:
            xm = make_sampled_image.generate_xm_const_size_gpu(
                lm.data,
                self.gsize,
                x.data,
                num_lm,
                g_size=self.gsize,
                img_size=self.img_size)
        else:
            xm = make_sampled_image.generate_xm_const_size(
                lm.data,
                self.gsize,
                x.data,
                num_lm,
                g_size=self.gsize,
                img_size=self.img_size)

        return xm, lm
コード例 #9
0
 def make_img(self, x, l, s, num_lm, random=0):
     if random == 0:
         lm = Variable(xp.clip(l.data, 0, 1))
         sm = Variable(xp.clip(s.data, 0, 1))
     else:
         eps = xp.random.normal(0, 1, size=l.data.shape).astype(xp.float32)
         epss = xp.random.normal(0, 1, size=s.data.shape).astype(xp.float32)
         sm = xp.clip((s.data + xp.sqrt(self.var) * epss), 0,
                      1).astype(xp.float32)
         lm = xp.clip(l.data + eps * xp.sqrt(self.vars), 0, 1)
         sm = Variable(sm)
         lm = Variable(lm.astype(xp.float32))
     if self.use_gpu:
         xm = make_sampled_image.generate_xm_rgb_gpu(lm.data,
                                                     sm.data,
                                                     x,
                                                     num_lm,
                                                     g_size=self.gsize)
     else:
         xm = make_sampled_image.generate_xm_rgb(lm.data,
                                                 sm.data,
                                                 x,
                                                 num_lm,
                                                 g_size=self.gsize)
     return xm, lm, sm
コード例 #10
0
 def pool1(self, x):
     x = Variable(
         x.astype(cp.float32).reshape(-1, 1, self.atomsize, self.lensize))
     h = F.leaky_relu(self.bn1(self.conv1(x)))  # 1st conv
     h = F.average_pooling_2d(h, (self.k2, 1),
                              stride=self.s2,
                              pad=(self.k2 // 2, 0))  # 1st pooling
     return h.data
コード例 #11
0
 def fingerprint(self, x):
     x = Variable(
         x.astype(cp.float32).reshape(-1, 1, self.atomsize, self.lensize))
     h = F.leaky_relu(self.bn1(self.conv1(x)))  # 1st conv
     h = F.average_pooling_2d(h, (self.k2, 1),
                              stride=self.s2,
                              pad=(self.k2 // 2, 0))  # 1st pooling
     h = F.leaky_relu(self.bn2(self.conv2(h)))  # 2nd conv
     h = F.average_pooling_2d(h, (self.k3, 1),
                              stride=self.s3,
                              pad=(self.k3 // 2, 0))  # 2nd pooling
     h = F.max_pooling_2d(h,
                          (self.l4, 1))  # grobal max pooling, fingerprint
     return h.data
コード例 #12
0
def test(gen_infogan,gen_cgan,n_tests,n_z_infogan, n_z_cgan, n_continuous):
    # cinfogan
    z_infogan = np.random.uniform(-2, 2, (n_tests, n_z_infogan+n_continuous))
    z_infogan = Variable(np.array(z_infogan, dtype=np.float32))

    # cgan
    z_cgan = np.random.uniform(-2, 2, (n_tests, n_z_cgan))
    z_cgan = Variable(np.array(z_cgan, dtype=np.float32))
    
    x = np.random.uniform(0.0, 1.0, (n_tests,12));
    x = Variable(x.astype(np.float32))
    infogan_xi= gen_infogan(z_infogan,x)
    cgan_xi = gen_cgan(z_cgan,x)
    return collisions_measure(x.data,cgan_xi.data,infogan_xi.data)
コード例 #13
0
    def make_img(self, x, l, num_lm, random=0):
        s = xp.log10(xp.ones((1, 1)) * self.gsize / self.img_size) + 1
        sm = xp.repeat(s, num_lm, axis=0)

        if random == 0:
            lm = Variable(xp.clip(l.data, 0, 1))
        else:
            eps = xp.random.normal(0, 1, size=l.data.shape).astype(xp.float32)
            lm = xp.clip(l.data + eps * xp.sqrt(self.vars), 0, 1)
            lm = Variable(lm.astype(xp.float32))
        if self.use_gpu:
            xm = make_sampled_image.generate_xm_rgb_gpu(lm.data, sm, x, num_lm, g_size=self.gsize)
        else:
            xm = make_sampled_image.generate_xm_rgb(lm.data, sm, x, num_lm, g_size=self.gsize)
        return xm, lm
コード例 #14
0
ファイル: fluid.py プロジェクト: mattya/chainer-fluid
def sim():
    zeta0 = (np.random.uniform(-10.0, 10.0, (1, 1, H, W)).astype(np.float32))
    zeta0 = Variable(chainer.cuda.to_gpu(zeta0.astype(np.float32)),
                     volatile=True)
    for it in range(100):
        zeta0 += 0.1 * lap.forward(zeta0)
    zeta = 0.0 + zeta0
    psi = poisson_jacobi(zeta, num_iter=1000)
    rho = Variable(rho0, volatile=True)

    for i in range(10000):
        psi = poisson_jacobi(zeta, x0=psi)

        dpdx = FTCS_X().forward(psi)  # -vy
        dpdy = FTCS_Y().forward(psi)  #  vx
        dzdx = upwind(Kawamura_X().forward(zeta), dpdy)
        dzdy = upwind(Kawamura_Y().forward(zeta), -dpdx)
        lapz = lap.forward(zeta)

        rho_ = rho - 0.5 * dt * (
            dpdy * upwind(Kawamura_X().forward(rho), dpdy) - dpdx * upwind(
                Kawamura_Y().forward(rho), -dpdx) - 0.000 * lap.forward(rho))
        rho_.data[0, 0, :, 0] = rho_.data[0, 0, :, 499]
        sum_rho = chainer.functions.sum(rho_)
        rho_ = rho_ / (xp.zeros_like(rho.data) + sum_rho)

        dzdt = dpdx * dzdy - dpdy * dzdx + nu * lapz
        zeta_ = zeta + 0.5 * dt * dzdt

        psi = poisson_jacobi(zeta_, x0=psi)

        dpdx = FTCS_X().forward(psi)  # -vy
        dpdy = FTCS_Y().forward(psi)  #  vx
        dzdx = upwind(Kawamura_X().forward(zeta_), dpdy)
        dzdy = upwind(Kawamura_Y().forward(zeta_), -dpdx)
        lapz = lap.forward(zeta_)

        rho = rho - dt * (dpdy * upwind(Kawamura_X().forward(rho_), dpdy) -
                          dpdx * upwind(Kawamura_Y().forward(rho_), -dpdx) -
                          0.000 * lap.forward(rho_))
        rho.data[0, 0, :, 0] = rho.data[0, 0, :, 499]
        sum_rho = chainer.functions.sum(rho)
        rho = rho / (xp.zeros_like(rho.data) + sum_rho)

        dzdt = dpdx * dzdy - dpdy * dzdx + nu * lapz
        zeta = zeta + dt * dzdt
        if i % 10 == 0:
            yield zeta, psi, rho, i
コード例 #15
0
ファイル: fluid.py プロジェクト: chenaoki/chainer-fluid
def sim():
    zeta0 = (np.random.uniform(-10.0, 10.0, (1,1,H,W)).astype(np.float32))
    zeta0 = Variable(chainer.cuda.to_gpu(zeta0.astype(np.float32)), volatile=True)
    for it in range(100):
        zeta0 += 0.1*lap.forward(zeta0)
    zeta = 0.0 + zeta0
    psi = poisson_jacobi(zeta, num_iter=1000)
    rho = Variable(rho0, volatile=True)

    for i in range(10000):
        psi = poisson_jacobi(zeta, x0=psi)

        dpdx = FTCS_X().forward(psi)  # -vy
        dpdy = FTCS_Y().forward(psi)  #  vx
        dzdx = upwind(Kawamura_X().forward(zeta), dpdy)
        dzdy = upwind(Kawamura_Y().forward(zeta), -dpdx)
        lapz = lap.forward(zeta)

        rho_ = rho-0.5*dt*(dpdy*upwind(Kawamura_X().forward(rho), dpdy)-dpdx*upwind(Kawamura_Y().forward(rho), -dpdx) - 0.000*lap.forward(rho))
        rho_.data[0,0,:,0] = rho_.data[0,0,:,499]
        sum_rho = chainer.functions.sum(rho_)
        rho_ = rho_/(xp.zeros_like(rho.data)+sum_rho)

        dzdt = dpdx*dzdy - dpdy*dzdx + nu*lapz
        zeta_ = zeta+0.5*dt * dzdt

        psi = poisson_jacobi(zeta_, x0=psi)

        dpdx = FTCS_X().forward(psi)  # -vy
        dpdy = FTCS_Y().forward(psi)  #  vx
        dzdx = upwind(Kawamura_X().forward(zeta_), dpdy)
        dzdy = upwind(Kawamura_Y().forward(zeta_), -dpdx)
        lapz = lap.forward(zeta_)

        rho = rho - dt*(dpdy*upwind(Kawamura_X().forward(rho_), dpdy)-dpdx*upwind(Kawamura_Y().forward(rho_), -dpdx) - 0.000*lap.forward(rho_))
        rho.data[0,0,:,0] = rho.data[0,0,:,499]
        sum_rho = chainer.functions.sum(rho)
        rho = rho/(xp.zeros_like(rho.data)+sum_rho)

        dzdt = dpdx*dzdy - dpdy*dzdx + nu*lapz
        zeta = zeta + dt * dzdt
        if i%10==0:
            yield zeta, psi, rho, i
コード例 #16
0
    def update_core(self):
        batch = self._iterators['main'].next()
        in_arrays = self.converter(batch, self.device)
        true_x = in_arrays  # mnist (200, 1, 28, 28)

        # create input z as random
        batchsize = true_x.shape[0]
        if self.device == 0:
            z = cuda.cupy.random.normal(size=(batchsize, self.z_dim, 1, 1), dtype=np.float32)
            z = Variable(z)
        else:
            z = np.random.uniform(-1, 1, (batchsize, self.z_dim, 1, 1))
            z = z.astype(dtype=np.float32)
            z = Variable(z)

        # G        -> x1                    ->  y of gen
        #              + -> X -> D -> split
        # Truedata -> x2                    ->  y of true data
        gen_output = self.gen(z) # gen_output (200, 1, 28, 28)
        x = F.concat((gen_output, true_x), 0) # gen_output + true_data = (400, 1, 28, 28)
        dis_output = self.dis(x)
        y_gen, y_data = F.split_axis(dis_output, 2, 0) # 0~1 value (200, 1, 1, 1)

        # DがGの生成物を1(間違い), TrueDataを0(正しい)と判定するように学習させる
        # sigmoid_cross_entropy(x, 0) == softplus(x)
        # sigmoid_cross_entropy(x, 1) == softplus(-x)
        loss_gen = F.sum(F.softplus(-y_gen))
        loss_data = F.sum(F.softplus(y_data))
        loss = (loss_gen + loss_data) / batchsize

        for optimizer in self._optimizers.values():
            optimizer.target.cleargrads()

        loss.backward()

        for optimizer in self._optimizers.values():
            optimizer.update()

        reporter.report({'loss':loss, 'gen/loss':loss_gen / batchsize, 'dis/loss':loss_data / batchsize})

        save_image(gen_output, self.epoch, self.device)
コード例 #17
0
    def test_save_normal_graphs(self):
        x = np.random.uniform(-1, 1, self.x_shape)
        x = Variable(x.astype(np.float32))

        for depth in six.moves.range(1, self.n_encdec + 1):
            model = segnet.SegNet(n_encdec=self.n_encdec,
                                  in_channel=self.x_shape[1])
            y = model(x, depth)
            cg = build_computational_graph([y],
                                           variable_style=_var_style,
                                           function_style=_func_style).dump()
            for e in range(1, self.n_encdec + 1):
                self.assertTrue('encdec{}'.format(e) in model._children)

            fn = 'tests/SegNet_x_depth-{}_{}.dot'.format(self.n_encdec, depth)
            if os.path.exists(fn):
                continue
            with open(fn, 'w') as f:
                f.write(cg)
            subprocess.call('dot -Tpng {} -o {}'.format(
                fn, fn.replace('.dot', '.png')),
                            shell=True)
コード例 #18
0
ファイル: example.py プロジェクト: rhythm92/gan-rl
    def __call__(self, X):
        # generate random values
        R = np.random.randn(X.data.shape[0], self.rand_sz)
        R = Variable(R.astype("float32"))

        # attach random to the inputs
        h = F.concat([R, X])
        #h = R

        h = self.ipt(h)
        #h = F.dropout(h)
        y = self.out(h)

        # prior knowledge: environment observation is one - hot vector
        obs = F.softmax(y[:, :-2])
        # prior knowledge: reward is in [0,1]
        rew = F.sigmoid(y[:, [-2]])
        fin = F.sigmoid(y[:, [-1]])

        y = F.concat([obs, rew, fin])

        return y
コード例 #19
0
 def __call__(self, x, t):
     x = Variable(x.astype(np.float32).reshape(x.shape[0], 4))
     t = Variable(t.astype("i"))
     y = self.forward(x)
     return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
コード例 #20
0
    def predict(self, x):
        x = Variable(x.astype(np.float32).reshape(x.shape[0],1))
        y = self.forward(x)

        return y.data
コード例 #21
0
def main():
	# Set the number of epochs
	parser = argparse.ArgumentParser(description='IaGo:')
	parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train')
	parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID to be used')
	args = parser.parse_args()

	# Model definition
	model = network.Value()
	optimizer = optimizers.Adam()
	optimizer.setup(model)
	optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(5e-4))
	cuda.get_device(args.gpu).use()

	test_x = np.load('./value_data/npy/states_test.npy')
	test_y = np.load('./value_data/npy/results_test.npy')
	test_x = np.stack([test_x==1, test_x==2], axis=0).astype(np.float32)
	test_x = Variable(cuda.to_gpu(test_x.transpose(1,0,2,3)))
	test_y = Variable(cuda.to_gpu(test_y.astype(np.float32)))

	# Load train dataset
	train_x = np.load('./value_data/npy/states.npy')
	train_y = np.load('./value_data/npy/results.npy')
	train_size = train_y.shape[0]
	minibatch_size = 4096 # 2**12

	# Learing loop
	for epoch in tqdm(range(args.epoch)):
		model.to_gpu(args.gpu)
		# Should be unnecessary...
		#chainer.config.train = True
		#chainer.config.enable_backprop = True
		# Shuffle train dataset
		rands = np.random.choice(train_size, train_size, replace=False)
		train_x = train_x[rands,:,:]
		train_y = train_y[rands]

		# Minibatch learning
		for idx in tqdm(range(0, train_size, minibatch_size)):
			x = train_x[idx:min(idx+minibatch_size, train_size), :, :]
			x = np.stack([x==1, x==2], axis=0).astype(np.float32)
			x = Variable(cuda.to_gpu(x.transpose(1,0,2,3)))
			y = train_y[idx:min(idx+minibatch_size, train_size)]
			y = Variable(cuda.to_gpu(y.astype(np.float32)))
			train_pred = model(x)
			train_loss = mean_squared_error(train_pred, y)
			model.cleargrads()
			train_loss.backward()
			optimizer.update()
		# Calculate loss
		with chainer.using_config('train', False):
			with chainer.using_config('enable_backprop', False):
				test_pred = model(test_x)
		test_loss = mean_squared_error(test_pred, test_y)
		print('\nepoch :', epoch, '  loss :', test_loss)
		# Log
		with open("./log_value.txt", "a") as f:
			f.write(str(test_loss)[9:15]+", \n")
		# Save models
		model.to_cpu()
		serializers.save_npz('./models/value_model.npz', model)
		serializers.save_npz('./models/value_optimizer.npz', optimizer)
コード例 #22
0
    def __call__(self, x_data, y_data, train=True, n_patches=32):
        if not isinstance(x_data, Variable):
            x = Variable(x_data, volatile=not train)
        else:
            x = x_data
            x_data = x.data

        #self.n_images = y_data.shape[0]
        self.n_images = 1
        self.n_patches = x_data.shape[0]
        #        print x_data.shape[0]
        #        print y_data.shape[0]
        self.n_patches_per_image = self.n_patches / self.n_images
        #self.n_patches_per_image = 32

        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2)

        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(h, 2)

        h = F.relu(self.conv5(h))
        h = F.relu(self.conv6(h))
        h = F.max_pooling_2d(h, 2)

        h = F.relu(self.conv7(h))
        h = F.relu(self.conv8(h))
        h = F.max_pooling_2d(h, 2)

        h = F.relu(self.conv9(h))
        h = F.relu(self.conv10(h))
        h = F.max_pooling_2d(h, 2)

        h_ = h
        self.h = h_

        h = F.dropout(F.relu(self.fc1(h_)), train=train, ratio=0.5)
        h = self.fc2(h)
        if self.top == "weighted":
            a = F.dropout(F.relu(self.fc1_a(h_)), train=train, ratio=0.5)
            a = F.relu(self.fc2_a(a)) + 0.000001
            t = Variable(y_data, volatile=not train)
            self.weighted_loss(h, a, t)
        elif self.top == "patchwise":
            a = xp.ones_like(h.data)
            #y_data.data, to make y_data do not be a Variable
            t = xp.repeat(y_data.data, 1)
            t = xp.array(t.astype(np.float32))
            #print h.data
            #print len(t)
            #print t
            self.patchwise_loss(h, a, t)

        if train:
            reporter.report({'loss': self.loss}, self)
            #print 'self.lose:', self.loss.data
            return self.loss
        else:
            return self.loss, self.y
コード例 #23
0
        total_loss, acc = model(x, y)
        return total_loss


    # model.save_model("./model.npz",save_format="npz")
    model.load_model("./model.npz", load_format="npz")
    serializers.load_npz("./opt.npz", opt)

    step = 0
    while True:
        # x = Variable(data=np.zeros(shape=[10, 3, 60, 160], dtype=np.float32))
        # y = Variable(data=np.zeros(shape=[10, 5], dtype=np.int32))

        x, y = yzmData.nextBatch(testOrTrain="train", batch_size=64)

        x = Variable(x.astype(np.float32))
        y = Variable(y.astype(np.int32))
        x.to_gpu(0)
        y.to_gpu(0)
        opt.update(loss, x.data, y.data)

        print("step:%d\ttotal_loss:%f\terro_rate:%f" % (step, model.loss.data, model.acc.data))

        if step % 100 == 0:
            model.save_model("./model.npz", save_format="npz")
            serializers.save_npz("./opt.npz", opt)

            test_x, test_t = yzmData.nextBatch(testOrTrain="test", batch_size=40)
            test_x = Variable(test_x.astype(np.float32))
            test_t = Variable(test_t.astype(np.int32))
            test_x.to_gpu(0)
コード例 #24
0
ファイル: enhance.py プロジェクト: wyn314/jhu-neural-wpe
                T, F = s_x.shape
                Y = np.zeros((8, T, F), dtype=np.complex64)
            Y[ch-1, :, :] = s_x
            s_x_abs = 20 * log_sp(np.abs(s_x))
            s_x_abs = stack_features(s_x_abs.astype(np.float32), 5)
            s_x_abs = Variable(s_x_abs)
            if args.gpu >= 0:
                s_x_abs.to_gpu(args.gpu)
            s_x_abs_list.append(s_x_abs)
    elif args.single == 1:
        audio_data = read_audio(cur_line)
        fx, tx, s_x = signal.stft(audio_data, fs=16000, nperseg=512,
                                  noverlap=512-128, nfft=512)
        s_x = np.transpose(s_x)
        s_x_abs = 20 * log_sp(np.abs(s_x))
        s_x_abs = stack_features(s_x_abs.astype(np.float32), 5)
        s_x_abs = Variable(s_x_abs)
        if args.gpu >= 0:
            s_x_abs.to_gpu(args.gpu)
        s_x_abs_list.append(s_x_abs)
    with chainer.no_backprop_mode():
        X = model.predict(s_x_abs_list)
        if args.gpu >= 0:
            X.to_cpu()

    if args.single == 0:
        xs = X.data
        xs = np.reshape(xs, (8, xs.shape[0] // 8, xs.shape[1]))
        taps = 10
        xs_power = np.square(exp_sp(xs/20))
        Y_hat = np.copy(Y)
コード例 #25
0
    def __call__(self, x, t):
        x = Variable(x.astype(np.float32).reshape(x.shape[0],1))
        t = Variable(t.astype(np.float32).reshape(t.shape[0],1))

        return F.mean_squared_error(self.forward(x), t)
コード例 #26
0
 def layer1(self, x):
     x = Variable(
         x.astype(cp.float32).reshape(-1, 1, self.atomsize, self.lensize))
     h = self.bn1(self.conv1(x))  # 1st conv
     return h.data