Ejemplo n.º 1
0
def min(input, dim):
    return nd.min(input, axis=dim)
Ejemplo n.º 2
0
def img_norm(img):
    max_val = nd.max(img)
    min_val = nd.min(img)
    return (img - min_val) / (max_val - min_val)
Ejemplo n.º 3
0
    	netDe.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_De.params', ctx=ctx)
    	netD.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_D.params', ctx=ctx)
    	#netD2.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_D2.params', ctx=ctx)

	#fakecode = nd.random.uniform(low = -1, high = 1, shape=(16, 4096,1,1), ctx=ctx)
        fakecode = nd.random.uniform(low = -1, high = 1, shape=(16, 64,1,1), ctx=ctx)
	out = netDe(fakecode)
	import load_image
	test_data = load_image.load_test_images(testclasspaths,testclasslabels,opt.batch_size, opt.img_wd, opt.img_ht, ctx, opt.noisevar)
    	for batch in (test_data):
        	real_in = batch.data[0].as_in_context(ctx)
        code = netEn(real_in)
	recon = netDe(code)
	
	print(nd.max(netEn(real_in)))
        print(nd.min(netEn(real_in)))
	print(real_in.shape)
	print(recon.shape)
	fake_img1 = nd.concat(real_in[0],recon[0], recon[0], out[3],dim=1)
	fake_img2 = nd.concat(out[7],out[6], out[5], out[4],dim=1)
	fake_img3 = nd.concat(out[8],out[9], out[10], out[11],dim=1)
	fake_img4 = nd.concat(out[15],out[14], out[13], out[12],dim=1)        
	fake_img = nd.concat(fake_img1,fake_img2, fake_img3,fake_img4, dim=2)
        #print(np.shape(fake_img))
        #visual.visualize(fake_img)
	tep = code[0].asnumpy()
	tep.flatten()
	plt.figure(figsize=(20,10))
	print(np.shape(code[0].asnumpy().flatten()))
 	dec = netDe(code)
	dec2 = netDe(fakecode)	
    def inference_g(self, observed_arr):
        '''
        Inference with generator.

        Args:
            observed_arr:       `mxnet.ndarray` of observed data points.
        
        Returns:
            Tuple data.
            - re-parametric data.
            - encoded data points.
            - re-encoded data points.
        '''
        generated_arr, encoded_arr, re_encoded_arr = super().inference_g(observed_arr)

        if autograd.is_recording():
            limit = self.long_term_seq_len

            seq_len = self.noise_sampler.seq_len
            self.noise_sampler.seq_len = limit
            long_term_observed_arr = self.noise_sampler.draw()

            observed_mean_arr = nd.expand_dims(nd.mean(long_term_observed_arr, axis=1), axis=1)
            sum_arr = None
            for seq in range(2, long_term_observed_arr.shape[1]):
                add_arr = nd.sum(long_term_observed_arr[:, :seq] - observed_mean_arr, axis=1)
                if sum_arr is None:
                    sum_arr = nd.expand_dims(add_arr, axis=0)
                else:
                    sum_arr = nd.concat(
                        sum_arr,
                        nd.expand_dims(add_arr, axis=0),
                        dim=0
                    )
            max_arr = nd.max(sum_arr, axis=0)
            min_arr = nd.min(sum_arr, axis=0)

            diff_arr = long_term_observed_arr - observed_mean_arr
            std_arr = nd.power(nd.mean(nd.square(diff_arr), axis=1), 1/2)
            R_S_arr = (max_arr - min_arr) / std_arr
            len_arr = nd.ones_like(R_S_arr, ctx=R_S_arr.context) * np.log(long_term_observed_arr.shape[1] / 2)
            observed_H_arr = nd.log(R_S_arr) / len_arr

            self.noise_sampler.seq_len = seq_len

            g_min_arr = nd.expand_dims(generated_arr.min(axis=1), axis=1)
            g_max_arr = nd.expand_dims(generated_arr.max(axis=1), axis=1)
            o_min_arr = nd.expand_dims(observed_arr.min(axis=1), axis=1)
            o_max_arr = nd.expand_dims(observed_arr.max(axis=1), axis=1)

            _observed_arr = generated_arr

            long_term_generated_arr = None
            for i in range(limit):
                generated_arr, _, _ = super().inference_g(_observed_arr)

                g_min_arr = nd.expand_dims(generated_arr.min(axis=1), axis=1)
                g_max_arr = nd.expand_dims(generated_arr.max(axis=1), axis=1)
                o_min_arr = nd.expand_dims(_observed_arr.min(axis=1), axis=1)
                o_max_arr = nd.expand_dims(_observed_arr.max(axis=1), axis=1)
                generated_arr = (generated_arr - g_min_arr) / (g_max_arr - g_min_arr)
                generated_arr = (o_max_arr - o_min_arr) * generated_arr
                generated_arr = o_min_arr + generated_arr

                if self.condition_sampler is not None:
                    self.condition_sampler.output_shape = generated_arr.shape
                    noise_arr = self.condition_sampler.generate()
                    generated_arr += noise_arr

                if long_term_generated_arr is None:
                    long_term_generated_arr = generated_arr
                else:
                    long_term_generated_arr = nd.concat(
                        long_term_generated_arr,
                        generated_arr,
                        dim=1
                    )

                _observed_arr = generated_arr

            generated_mean_arr = nd.expand_dims(nd.mean(long_term_generated_arr, axis=1), axis=1)
            sum_arr = None
            for seq in range(2, long_term_generated_arr.shape[1]):
                add_arr = nd.sum(long_term_generated_arr[:, :seq] - generated_mean_arr, axis=1)
                if sum_arr is None:
                    sum_arr = nd.expand_dims(add_arr, axis=0)
                else:
                    sum_arr = nd.concat(
                        sum_arr,
                        nd.expand_dims(add_arr, axis=0),
                        dim=0
                    )
            max_arr = nd.max(sum_arr, axis=0)
            min_arr = nd.min(sum_arr, axis=0)

            diff_arr = long_term_generated_arr - generated_mean_arr
            std_arr = nd.power(nd.mean(nd.square(diff_arr), axis=1), 1/2)
            R_S_arr = (max_arr - min_arr) / std_arr
            len_arr = nd.ones_like(R_S_arr, ctx=R_S_arr.context) * np.log(long_term_generated_arr.shape[1] / 2)
            generated_H_arr = nd.log(R_S_arr) / len_arr

            multi_fractal_loss = nd.abs(generated_H_arr - observed_H_arr)
            multi_fractal_loss = nd.mean(multi_fractal_loss, axis=0, exclude=True)
            multi_fractal_loss = nd.expand_dims(multi_fractal_loss, axis=-1)
            multi_fractal_loss = nd.expand_dims(multi_fractal_loss, axis=-1)

            generated_arr = generated_arr + multi_fractal_loss

        return generated_arr, encoded_arr, re_encoded_arr
Ejemplo n.º 5
0
        # (nd.clip(valid_ds_aug, -50, 40) - min_val) / (max_val - min_val),
        valid_ds_aug,
        nd.array(label_valid_aug).astype('float32'))

    # test_ds = ((nd.clip(test, -50, 40) - min_val) / (max_val - min_val), ids)
    test_norm = []

    for k in range(test.shape[0]):
        imag = test[k].reshape(shape=(1, test[k].shape[0], test[k].shape[1],
                                      test[k].shape[2]))
        test_norm.append(img_norm(imag))

    test_ds = (nd.concat(*test_norm, dim=0).astype('float32'), ids)

    print("max/min train: %f\t%f" %
          (nd.max(train_ds[0]).asscalar(), nd.min(train_ds[0]).asscalar()))
    print("max/min valid: %f\t%f" %
          (nd.max(valid_ds[0]).asscalar(), nd.min(valid_ds[0]).asscalar()))
    print("max/min test: %f\t%f" %
          (nd.max(test_ds[0]).asscalar(), nd.min(test_ds[0]).asscalar()))

    print("finish gen train/valid dataset")

    batch_size = 128
    train_data = utils.DataLoader(train_ds, batch_size, shuffle=True)
    valid_data = utils.DataLoader(valid_ds, batch_size, shuffle=False)
    test_data = utils.TestDataLoader(test_ds, batch_size)

    ctx = utils.try_gpu()
    num_epochs = 100
Ejemplo n.º 6
0
 def hybrid_forward(self, F, x):
     x = F.min(data=x, axis=(2, 3), keepdims=True, name='GlobalMinPool2D')
     return x