Esempio n. 1
0
    def __c_1_lambda_om(_lambda, dim, ecdf):

        _lambda_half = int(ceil(_lambda / 2.0))

        proj_max = zeros(n_sample_per_cycle)

        for i in range(n_sample):

            # mirrored orthogonal sampling
            q = qr(randn(dim, dim))[0]
            l = chi.rvs(dim, size=dim)
            s = l * q
            samples = s[:, 0:_lambda_half]
            samples = append(samples, -samples, axis=1)

            # projection onto e1
            proj = samples[0, :]

            # the largest order statistic
            proj_sorted = sort(proj)
            proj_max[i % n_sample_per_cycle] = proj_sorted[-1]

            if (i + 1) % n_sample_per_cycle == 0:
                for k, x in enumerate(x_point):
                    ecdf[k] += np.sum(proj_max <= x)
Esempio n. 2
0
    def fit(self, X, y=None):
        """Generate random weights according to n_features.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        Returns
        -------
        self : object
            Returns the transformer.
        """
        random_state = check_random_state(self.random_state)
        X = check_array(X, accept_sparse=True)
        n_samples, n_features = X.shape
        n_features_padded = next_pow_of_two(n_features)
        n_stacks = int(np.ceil(self.n_components / n_features_padded))

        if self.random_fourier and not self.use_offset:
            n_stacks = int(np.ceil(n_stacks / 2))
            n_components = 2 * n_stacks * n_features_padded
        else:
            n_components = n_stacks * n_features_padded

        if n_components != self.n_components:
            warnings.warn("n_components is changed from {0} to {1}. "
                          "You should set n_components n-tuple of the next "
                          "power of two of n_features.".format(
                              self.n_components, n_components))
            self.n_components = n_components

        # n_stacks * n_features_padded = self.n_components
        size = (n_stacks, n_features_padded)
        if isinstance(self.distribution, str):
            distribution = _get_random_matrix(self.distribution)
        else:
            distribution = self.distribution
        self.random_weights_ = distribution(random_state, size)
        self.random_sign_ = rademacher(random_state, (n_stacks, n_features))

        self.random_perm_ = np.zeros(size, dtype=np.int32)
        self._fy_vector_ = np.zeros(size, dtype=np.int32)
        for t in range(n_stacks):
            perm, fyvec = fisher_yates_shuffle_with_indices(
                n_features_padded, random_state)
            self.random_perm_[t] = perm
            self._fy_vector_[t] = fyvec

        Frobs = np.sqrt(np.sum(self.random_weights_**2, axis=1, keepdims=True))
        self.random_scaling_ = chi.rvs(
            n_features_padded, size=size, random_state=random_state) / Frobs

        if self.random_fourier and self.use_offset:
            self.random_offset_ = random_state.uniform(0, 2 * np.pi,
                                                       self.n_components)
        else:
            self.random_offset_ = None
        return self
Esempio n. 3
0
    def fit(self, X, y=None):
        """Fit the model with X.
        Samples a couple of random based vectors to approximate a Gaussian
        random projection matrix to generate n_components features.

        Parameters
        ----------
        X : {array-like}, shape (n_samples, n_features),
            Training data, where n_samples in the number of samples
            and n_features is the number of features.

        Returns
        -------
        self : object,
            Returns the transformer.
        """
        X = check_array(X)

        d_orig = X.shape[1]

        self.d, self.n, self.times_to_stack_v = Fastfood.enforce_dimensionality_constraints(
            d_orig, self.n_components)
        self.number_of_features_to_pad_with_zeros = self.d - d_orig

        self.G = self.rng.normal(size=(self.times_to_stack_v, self.d))
        self.B = choice([-1, 1],
                        size=(self.times_to_stack_v, self.d),
                        replace=True)
        self.P = np.hstack([(i * self.d) + self.rng.permutation(self.d)
                            for i in range(self.times_to_stack_v)])
        self.S = np.multiply(
            1 / self.l2norm_along_axis1(self.G).reshape((-1, 1)),
            chi.rvs(self.d, size=(self.times_to_stack_v, self.d)))
        self.U = self.uniform_vector()
        return self
	def sampler(self, size):
		"""
			Defines the sampler object

		Args:
		 	size:

		Return:
		"""
		if self.kernel == "squared_exponential":
			distribution = lambda size: np.random.normal(size=size) * (1. / self.gamma)
			inv_cum_dist = lambda x: norm.ppf(x) * (1. / self.gamma)

		elif self.kernel == "laplace":
			distribution = None
			inv_cum_dist = lambda x: (np.tan(np.pi * x - np.pi) / self.gamma)

		elif self.kernel == "modified_matern":
			if self.nu == 2:
				distribution = None
				inv_cum_dist = None
				pdf = lambda x: np.prod(2*(self.gamma)/(np.power((1. + self.gamma**2*x**2),2) * np.pi),axis =1)
			elif self.nu == 3:
				distribution = None
				inv_cum_dist = None
				pdf = lambda x: np.prod((8.*self.gamma)/(np.power((1. + self.gamma**2*x**2),3) *3* np.pi),axis =1)
			elif self.nu == 4:
				distribution = None
				inv_cum_dist = None
				pdf = lambda x: np.prod((16.*self.gamma)/(np.power((1. + self.gamma**2*x**2),4) *5 * np.pi),axis =1)

		# Random Fourier Features
		if self.approx == "rff":
			if distribution == None:
				if inv_cum_dist == None:
					self.W = helper.rejection_sampling(pdf,size = size)
				else:
					self.W = helper.sample_custom(inv_cum_dist, size=size)
			else:
				self.W = distribution(size)

		# Quasi Fourier Features
		elif self.approx == "halton":
			if inv_cum_dist != None:
				self.W = helper.sample_qmc_halton(inv_cum_dist, size=size)
			else:
				raise AssertionError("Inverse Cumulative Distribution could not be deduced")

		elif self.approx == "orf":
			distribution = lambda size: np.random.normal(size=size) * (1.)
			self.W = distribution(size)

			# QR decomposition
			self.Q,_ = np.linalg.qr(self.W)
			# df and size
			self.S = np.diag(chi.rvs(size[1], size=size[0]))
			self.W = np.dot(self.S,self.Q)/self.gamma**2

		return self.W
Esempio n. 5
0
def hypercomplex_init(in_features,
                      out_features,
                      rng,
                      kernel_size=None,
                      criterion='glorot',
                      num_components=8):

    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (fan_in + fan_out))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * fan_in)
    else:
        raise ValueError('Invalid criterion: ' + criterion)

    # rng = RandomState(np.random.randint(1, 1234))

    # Generating randoms and purely imaginary hyper(complex) :
    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        if type(kernel_size) is int:
            kernel_shape = (out_features, in_features) + tuple((kernel_size, ))
        else:
            kernel_shape = (out_features, in_features) + (*kernel_size, )

    modulus = chi.rvs(num_components, loc=0, scale=s, size=kernel_shape)
    number_of_weights = np.prod(kernel_shape)
    v = [
        np.random.uniform(-1.0, 1.0, number_of_weights)
        for component in range(num_components - 1)
    ]

    # Purely imaginary hyper(complex) unitary
    for i in range(0, number_of_weights):
        norm = np.sqrt(
            sum(v[j][i]**2 for j in range(num_components - 1)) + 0.0001)
        for j in range(num_components - 1):
            v[j][i] /= norm
    v = [v_c.reshape(kernel_shape) for v_c in v]

    phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)

    weight = [
        torch.from_numpy(modulus * np.cos(phase)).type(torch.FloatTensor)
    ]
    weight.extend([
        torch.from_numpy(modulus * v_c * np.sin(phase)).type(torch.FloatTensor)
        for v_c in v
    ])

    return weight
Esempio n. 6
0
def quaternion_init(in_features,
                    out_features,
                    rng,
                    kernel_size=None,
                    criterion='glorot'):

    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (fan_in + fan_out))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * fan_in)
    else:
        raise ValueError('Invalid criterion: ' + criterion)

    rng = RandomState(np.random.randint(1, 1234))

    # Generating randoms and purely imaginary quaternions :
    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        if type(kernel_size) is int:
            kernel_shape = (out_features, in_features) + tuple((kernel_size, ))
        else:
            kernel_shape = (out_features, in_features) + (*kernel_size, )

    modulus = chi.rvs(4, loc=0, scale=s, size=kernel_shape)

    #    modulus= rng.uniform(size=kernel_shape)
    number_of_weights = np.prod(kernel_shape)

    v_i = np.random.normal(0, 1.0, number_of_weights)
    v_j = np.random.normal(0, 1.0, number_of_weights)
    v_k = np.random.normal(0, 1.0, number_of_weights)

    # Purely imaginary quaternions unitary
    for i in range(0, number_of_weights):
        norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 + 0.0001)
        v_i[i] /= norm
        v_j[i] /= norm
        v_k[i] /= norm
    v_i = v_i.reshape(kernel_shape)
    v_j = v_j.reshape(kernel_shape)
    v_k = v_k.reshape(kernel_shape)

    phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)

    weight_r = modulus * np.cos(phase)
    weight_i = modulus * v_i * np.sin(phase)
    weight_j = modulus * v_j * np.sin(phase)
    weight_k = modulus * v_k * np.sin(phase)

    return (weight_r, weight_i, weight_j, weight_k)
    def fit(self, X, y=None):
        """Fit the model with X.
        Samples a couple of random based vectors to approximate a Gaussian
        random projection matrix to generate n_components features.
        Parameters
        ----------
        X : {array-like}, shape (n_samples, n_features)
            Training data, where n_samples in the number of samples
            and n_features is the number of features.
        Returns
        -------
        self : object
            Returns the transformer.
        """
        X = check_array(X)

        d_orig = X.shape[1]  # Initial number of features

        # n_components (self.n) is the final number of features
        # times_to_stack_v is the integer division of n by d
        # we use times_to_stack_v according to the paper FastFood:
        # "When n > d, we replicate (7) for n/d independent random matrices
        # Vi, and stack them via Vt = [V_1, V_2, ..., V_(n/d)]t until we have
        # enoug dimensions."
        self.d, self.n, self.times_to_stack_v = \
            Fastfood.enforce_dimensionality_constraints(d_orig,
                                                        self.n_components)

        self.number_of_features_to_pad_with_zeros = self.d - d_orig

        if self.d != d_orig:
            warn(
                "Dimensionality of the input space as been changed (zero padding) from {} to {}."
                .format(d_orig, self.d))

        self.G = self.rng.normal(size=(self.times_to_stack_v, self.d))
        # G is a random matrix following normal distribution

        self.B = choice([-1, 1],
                        size=(self.times_to_stack_v, self.d),
                        replace=True,
                        random_state=self.random_state)
        # B is a random matrix of -1 and 1

        self.P = np.hstack([(i * self.d) + self.rng.permutation(self.d)
                            for i in range(self.times_to_stack_v)])
        # P is a matrix of size d*n/d = n -> the dimension of the embedding space
        # P is for the permutation and respects the V stacks (see FastFood paper)

        self.S = np.multiply(
            1 / self.l2norm_along_axis1(self.G).reshape((-1, 1)),
            chi.rvs(self.d, size=(self.times_to_stack_v, self.d)))

        self.H = scipy.linalg.hadamard(self.d)

        self.U = self.uniform_vector()

        return self
Esempio n. 8
0
def quaternion_init(in_features,
                    out_features,
                    rng,
                    kernel_size=None,
                    criterion='glorot'):
    """Initialize quaternion layer weights
    """
    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (fan_in + fan_out))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * fan_in)
    else:
        raise ValueError('Invalid criterion: ' + criterion)

    rng = np.random.RandomState(np.random.randint(1, 1234))

    # Generate randoms and purely imaginary quaternions
    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        # TODO(Emanuele): Expand different kernel sizes...
        kernel_shape = ...

    # Produce random variable vector
    modulus = chi.rvs(4, loc=0, scale=s, size=kernel_size)
    no_weights = np.prod(kernel_shape)
    v_i = np.random.uniform(-1.0, 1.0, no_weights)
    v_j = np.random.uniform(-1.0, 1.0, no_weights)
    v_k = np.random.uniform(-1.0, 1.0, no_weights)

    # Generate purely imaginary quaternions
    for i in range(0, no_weights):
        norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 + 0.0001)
        v_i[i] /= norm
        v_j[i] /= norm
        v_k[i] /= norm
    v_i = v_i.reshape(kernel_shape)
    v_j = v_j.reshape(kernel_shape)
    v_k = v_k.reshape(kernel_shape)

    phase = rng.uniform(low=np.pi, high=np.pi, size=kernel_shape)
    weight_r = modulus * np.cos(phase)
    weight_i = modulus * v_i * np.sin(phase)
    weight_j = modulus * v_j * np.sin(phase)
    weight_k = modulus * v_k * np.sin(phase)

    return weight_r, weight_i, weight_j, weight_k
    def fit(self, X, y=None):
        """Fit the model with X.

        Samples a couple of random based vectors to approximate a Gaussian
        random projection matrix to generate n_components features.

        Parameters
        ----------
        X : {array-like}, shape (n_samples, n_features)
            Training data, where n_samples in the number of samples
            and n_features is the number of features.

        Returns
        -------
        self : object
            Returns the transformer.
        """
        X = check_array(X, order="C", dtype=np.float64)

        d_orig = X.shape[1]
        rng = check_random_state(self.random_state)

        (
            self._d,
            self._n,
            self._times_to_stack_v,
        ) = Fastfood._enforce_dimensionality_constraints(
            d_orig, self.n_components
        )
        self._number_of_features_to_pad_with_zeros = self._d - d_orig

        self._G = rng.normal(size=(self._times_to_stack_v, self._d))
        self._B = rng.choice(
            [-1, 1], size=(self._times_to_stack_v, self._d), replace=True
        )
        self._P = np.hstack(
            [
                (i * self._d) + rng.permutation(self._d)
                for i in range(self._times_to_stack_v)
            ]
        )
        self._S = np.multiply(
            1 / self._l2norm_along_axis1(self._G).reshape((-1, 1)),
            chi.rvs(
                self._d,
                size=(self._times_to_stack_v, self._d),
                random_state=rng,
            ),
        )

        self._U = self._uniform_vector(rng)
        self.is_fitted_ = True

        return self
def vectormap_init(vectormap_dim,
                   in_features,
                   out_features,
                   rng,
                   kernel_size=None,
                   criterion='glorot'):

    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == 'glorot':
        s = np.sqrt(2. / (vectormap_dim * (fan_in + fan_out)))
    elif criterion == 'he':
        s = np.sqrt(2. / (vectormap_dim * fan_in))
    else:
        raise ValueError('Invalid criterion: ' + criterion)

    rng = RandomState(np.random.randint(1, 1234))

    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        if type(kernel_size) is int:
            kernel_shape = (out_features, in_features) + tuple((kernel_size, ))
        else:
            kernel_shape = (out_features, in_features) + (*kernel_size, )

    modulus = chi.rvs(vectormap_dim, loc=0, scale=s, size=kernel_shape)
    number_of_weights = np.prod(kernel_shape)

    v_s = np.array([
        np.random.uniform(-1.0, 1.0, number_of_weights)
        for _ in range(vectormap_dim - 1)
    ])
    for i in range(0, number_of_weights):
        v_s[:, i] = v_s[:, i] / np.linalg.norm(v_s[:, i])

    v_s = [v.reshape(kernel_shape) for v in v_s]

    phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)

    weight = [modulus * np.cos(phase)]
    for v in v_s:
        weight.append(modulus * v * np.sin(phase))

    return tuple(weight)
Esempio n. 11
0
def parameters(n_number, p, d, FLAGS):
    G = np.random.randn(p * d)
    B = np.random.uniform(-1, 1, p * d)
    B[B > 0] = 1
    B[B < 0] = -1
    # PI_value = np.hstack
    PI_value = np.hstack([(i * d) + np.random.permutation(d)
                          for i in range(p)])
    G_fro = G.reshape(p, d)
    s_i = chi.rvs(d, size=(p, d))
    S = np.multiply(s_i,
                    np.array(np.linalg.norm(G_fro, axis=1)).reshape(p, -1))
    S = S.reshape(1, -1)
    FLAGS.b = np.random.uniform(0, 2 * math.pi, d * p)
    FLAGS.t = np.random.uniform(-1, 1, d * p)
    return PI_value, G, B, S
Esempio n. 12
0
def travelDistance(speed):
    """
    Determine how far an agent moves in a given step

    Draw from a chi distribution, then multiply by Agent's speed
    Minimum travel distance is 0.1 * speed

    Parameters
    ----------
    speed : float, Agent's current speed aka step size (Agent.speed)

    Returns
    -------
    distanceToTravel : float
    """
    r = np.max([chi.rvs(dfConstant),0.1])
    distanceToTravel = r * speed
    return distanceToTravel
Esempio n. 13
0
def initialize_conv(in_channels,
                    out_channels,
                    kernel_size=[2, 2],
                    init_mode="he"):
    """
    Initializes quaternion weight parameter for convolution.
    
    @type in_channels: int
    @type out_channels: int
    @type kernel_size: int/list/tuple
    @type init_mode: str
    """

    prod = np.prod(kernel_size)
    if init_mode == "he":
        scale = 1 / np.sqrt(in_channels * prod * 8)
    elif init_mode in ["xavier", "glorot"]:
        scale = 1 / np.sqrt((in_channels + out_channels) * prod * 8)

    if type(kernel_size) == int:
        window = [kernel_size, kernel_size]
    elif type(kernel_size) == tuple:
        window = list(kernel_size)
    elif type(kernel_size) == list:
        window = kernel_size

    size_real = [in_channels, out_channels] + window
    size_img = [size_real[0]] + [size_real[1] * 3] + size_real[2:]

    img_mat = torch.Tensor(*size_img).uniform_(-1, 1)
    mat = Q(torch.cat([torch.zeros(size_real), img_mat], 1))
    mat /= mat.norm()

    phase = torch.Tensor(*size_real).uniform_(-np.pi, np.pi)
    magnitude = torch.from_numpy(chi.rvs(4, loc=0, scale=scale,
                                         size=size_real)).float()

    r = magnitude * torch.cos(phase)
    factor = magnitude * torch.sin(phase)

    mat *= factor
    mat += r

    return mat
Esempio n. 14
0
def fastfood_value(n_number, T, d, FLAGS, method=1):
    FLAGS.T = T
    G = np.random.randn(T * d)
    B = np.random.uniform(-1, 1, T * d)
    B[B > 0] = 1
    B[B < 0] = -1
    # PI_value = np.hstack
    PI_value = np.hstack([(i * d) + np.random.permutation(d)
                          for i in range(T)])
    G_fro = G.reshape(T, d)
    s_i = chi.rvs(d, size=(T, d))
    S = np.multiply(s_i,
                    np.array(np.linalg.norm(G_fro, axis=1)).reshape(T, -1))
    S = S.reshape(1, -1)
    FLAGS.BATCHSIZE = n_number
    FLAGS.b = None
    FLAGS.t = None
    if method == 1:
        FLAGS.b = np.random.uniform(0, 2 * math.pi, d * T)
        FLAGS.t = np.random.uniform(-1, 1, d * T)
    return PI_value, G, B, S
Esempio n. 15
0
def phm_init(phm_dim: int,
             in_features: int,
             out_features: int,
             low: int = 0,
             high: int = 1,
             criterion: str = 'glorot',
             transpose: bool = True):

    fan_in = in_features
    fan_out = out_features

    if criterion == 'glorot':
        s = np.sqrt(2. / (phm_dim * (fan_in + fan_out)))
    elif criterion == 'he':
        s = np.sqrt(2. / (phm_dim * fan_in))
    else:
        raise ValueError('Invalid criterion: ' + criterion)

    kernel_shape = (in_features, out_features)
    magnitude = torch.from_numpy(
        chi.rvs(phm_dim, loc=0, scale=s, size=kernel_shape)).to(torch.float32)
    # purely imaginary vectormap
    v = unitary_init(phm_dim=phm_dim,
                     in_features=in_features,
                     out_features=out_features,
                     low=low,
                     high=high)

    theta = torch.from_numpy(
        np.random.uniform(low=-np.pi, high=np.pi,
                          size=kernel_shape)).to(torch.float32)

    weight = [magnitude * torch.cos(theta)]
    for vs in v[1:]:
        weight.append(magnitude * vs * torch.sin(theta))

    weight = torch.stack(weight, dim=0)
    if transpose:
        weight = weight.permute(0, 2, 1)
    return weight
Esempio n. 16
0
    def __c_1_lambda_om(_lambda, dim, proj_max):

        _lambda_half = int(ceil(_lambda / 2.0))

        # for memory concern
        n_sample = len(proj_max)

        for i in range(n_sample):

            # mirrored orthogonal sampling
            q = qr(randn(dim, dim))[0]
            l = chi.rvs(dim, size=dim)
            s = l * q
            samples = s[:, 0:_lambda_half]
            samples = append(samples, -samples, axis=1)

            # projection onto e1
            proj = samples[0, :]

            # the largest order statistic
            proj_sorted = sort(proj)
            proj_max[i] = proj_sorted[-1]
Esempio n. 17
0
    def test_chi(self):
        from scipy.stats import chi
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 1)

        df = 78
        mean, var, skew, kurt = chi.stats(df, moments='mvsk')

        x = np.linspace(chi.ppf(0.01, df), chi.ppf(0.99, df), 100)
        ax.plot(x, chi.pdf(x, df), 'r-', lw=5, alpha=0.6, label='chi pdf')

        rv = chi(df)
        ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

        vals = chi.ppf([0.001, 0.5, 0.999], df)
        np.allclose([0.001, 0.5, 0.999], chi.cdf(vals, df))

        r = chi.rvs(df, size=1000)

        ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
        ax.legend(loc='best', frameon=False)
        self.assertEqual(str(ax), "AxesSubplot(0.125,0.11;0.775x0.77)")
Esempio n. 18
0
def quaternion_init(in_features: int, out_features: int, criterion: str = 'glorot',
                    low: int = 0, high: int = 1, transpose: bool = True) -> (Tensor, Tensor, Tensor, Tensor):

    fan_in = in_features
    fan_out = out_features
    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (fan_in + fan_out))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * fan_in)
    else:
        raise ValueError('Invalid criterion: ' + criterion)


    kernel_shape = (in_features, out_features)
    magnitude = torch.from_numpy(chi.rvs(df=4, loc=0, scale=s, size=kernel_shape)).to(torch.float64)
    # Purely imaginary quaternions unitary
    _, v_i, v_j, v_k = unitary_init(in_features, out_features, low, high)

    theta = torch.from_numpy(np.random.uniform(low=-np.pi, high=np.pi, size=kernel_shape)).to(torch.float64)
    phi_i = torch.cos(torch.from_numpy(np.random.uniform(low=-s, high=s, size=kernel_shape)).to(torch.float64))**2
    phi_j = torch.cos(torch.from_numpy(np.random.uniform(low=-s, high=s, size=kernel_shape)).to(torch.float64))**2
    phi_k = torch.cos(torch.from_numpy(np.random.uniform(low=-s, high=s, size=kernel_shape)).to(torch.float64))**2
    phi = phi_i / (phi_i + phi_j + phi_k)
    phj = phi_j / (phi_i + phi_j + phi_k)
    phk = phi_k / (phi_i + phi_j + phi_k)
    weight_r = magnitude * torch.cos(theta)
    weight_i = magnitude * v_i * torch.sin(theta) * phi
    weight_j = magnitude * v_j * torch.sin(theta) * phj
    weight_k = magnitude * v_k * torch.sin(theta) * phk

    if transpose:
        weight_r = weight_r.t()
        weight_i = weight_i.t()
        weight_j = weight_j.t()
        weight_k = weight_k.t()

    return weight_r.to(torch.float32), weight_i.to(torch.float32), weight_j.to(torch.float32), weight_k.to(torch.float32)
Esempio n. 19
0
def initialize_linear(in_channels, out_channels, init_mode="he"):
    """
    Initializes quaternion weight parameter for linear.
    It can be shown that the variance for the magnitude is given
    as 4sigma from a chi-distribution with 4 dof's.
    The phase is uniformly initialized in [-pi, pi].
    The basis vectors are randomly initialized and normalized based on their norm.
    The whole initialization is performed considering the polar form of the quaternion.

    @type in_channels: int
    @type out_channels: int
    @type init_mode: str
    """

    if init_mode == "he":
        scale = 1 / np.sqrt(in_channels * 8)
    elif init_mode in ["xavier", "glorot"]:
        scale = 1 / np.sqrt((in_channels + out_channels) * 8)

    size_real = [in_channels, out_channels]
    size_img = [in_channels, out_channels * 3]

    img_mat = torch.Tensor(*size_img).uniform_(-1, 1)
    mat = Q(torch.cat([torch.zeros(size_real), img_mat], 1))
    mat /= mat.norm()

    phase = torch.Tensor(*size_real).uniform_(-np.pi, np.pi)
    magnitude = torch.from_numpy(chi.rvs(4, loc=0, scale=scale,
                                         size=size_real)).float()

    r = magnitude * torch.cos(phase)
    factor = magnitude * torch.sin(phase)

    mat *= factor
    mat += r
    return mat
Esempio n. 20
0
print(opt)

ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
nc = 3

netG = NetG(ngf, nz, nc, ngpu)
netG.load_state_dict(
    torch.load(opt.netG, map_location=lambda storage, loc: storage))
netG.eval()
print(netG)

for j in range(opt.niter):
    # step 1
    r = chi.rvs(df=100)

    # step 2
    u = numpy.random.normal(0, 1, nz)
    w = numpy.random.normal(0, 1, nz)
    u /= numpy.linalg.norm(u)
    w /= numpy.linalg.norm(w)

    v = w - numpy.dot(u, w) * u
    v /= numpy.linalg.norm(v)

    ndimgs = []
    for i in range(opt.n_steps):
        t = float(i) / float(opt.n_steps)
        # step 3
        z = numpy.cos(t * 2 * numpy.pi) * u + numpy.sin(t * 2 * numpy.pi) * v
Esempio n. 21
0
 def _weightsamples(self):
     s = chi.rvs(self.d2, size=self.G.shape)
     return self.d2 * s / norm(self.G, axis=1)[:, np.newaxis]
Esempio n. 22
0
def main(name):
    #read the dataset and initialize the parameter for hadamard transform
    x_train, y_train, x_test, y_test = get_data(name)
    x, y = x_train, y_train
    n_number, f_num = np.shape(x)
    FLAGS.BATCHSIZE = n_number
    d = 2**math.ceil(np.log2(f_num))
    T = FLAGS.T
    G = np.random.randn(T * d)
    B = np.random.uniform(-1, 1, T * d)
    B[B > 0] = 1
    B[B < 0] = -1
    PI_value = np.random.permutation(d)
    G_fro = G.reshape(T, d)
    s_i = chi.rvs(d, size=(T, d))
    S = np.multiply(
        s_i,
        np.array(np.linalg.norm(G_fro, axis=1)**(-0.5)).reshape(T, -1))
    S = S.reshape(1, -1)
    print(np.unique(y))

    class_number = len(np.unique(y))
    if FLAGS.d_openml != None:
        '''
        lable convert from str to int in openml dataset
        '''
        y_0 = np.zeros((n_number, 1))
        for i in range(class_number):
            y_temp = np.where(y[:] != '%d' % i, -1, 1)
            y_0 = np.hstack((y_0, np.mat(y_temp).T))
        y = y_0[:, 1:]
        y = np.argmax(y, axis=1)
    else:
        y = np.array(np.where(y[:] != 1, -1, 1).reshape(n_number, 1))

    print('Training Linear SVM')
    d = 2**math.ceil(np.log2(f_num))
    x_value = np.asmatrix(hadamard(d, f_num, x, G, B, PI_value, S))
    clf = LinearSVC()
    clf.fit(x_value, y)
    print('linear train', clf.score(x_value, y))
    W_fcP = np.asmatrix(-np.ones((T * d, class_number)))
    print(W_fcP.shape)
    n_number, f_num = np.shape(x)
    FLAGS.BATCHSIZE = n_number
    d = 2**math.ceil(np.log2(f_num))
    x_value = np.asmatrix(hadamard(d, f_num, x, G, B, PI_value, S))
    print(x_value.shape)
    x, y = x_test, y_test
    n_number, f_num = np.shape(x)
    if FLAGS.d_openml != None:
        '''
        lable convert from str to int in openml dataset
        '''
        y_0 = np.zeros((n_number, 1))
        for i in range(class_number):
            y_temp = np.where(y[:] != '%d' % i, -1, 1)
            y_0 = np.hstack((y_0, np.mat(y_temp).T))
        y = y_0[:, 1:]
        y = np.argmax(y, axis=1)
    else:
        y = np.array(np.where(y[:] != 1, -1, 1).reshape(n_number, 1))

    FLAGS.BATCHSIZE = n_number
    start = time.time()
    x_value = np.asmatrix(hadamard(d, f_num, x, G, B, PI_value, S))
    print('linear predict', clf.score(x_value, y))
    print(time.time() - start, 'linear svm')
Esempio n. 23
0
def radius(d, n):
    rv = chi.rvs(d, 0, 1, n)  #for x in range(0,n)] #for y in range(0,m)
    return rv
Esempio n. 24
0

def toFixed(x, fb):
    v = (1 << fb)
    if (x >= 0):
        return int(x * v + 0.5)
    else:
        return int(x * v - 0.5)


# generate parameters

B = rng.randint(2, size=(k, d)) * 2 - 1
G = rng.normal(size=(k, d))
S = (1 / (sigma * np.sqrt(d))) * np.multiply(
    1 / l2norm_along_axis1(G).reshape((-1, 1)), chi.rvs(d, size=(k, d)))

alpha = rng.normal(size=(1, n))

x = np.linspace(0, npts, npts) / float(1 << (int(np.log2(npts)) - 1))
b = rng.uniform(0, 1, size=npts) * 2 * np.pi
A = np.sqrt(2. / n)
cos = A * np.cos(x * np.pi + 0)

# save parameters to csv and cosTab.txt
"""
np.savetxt("params/B.csv", B, delimiter=",")
np.savetxt("params/G.csv", G, delimiter=",")
np.savetxt("params/S.csv", S, delimiter=",")
np.savetxt("params/alpha.csv", alpha, delimiter=",")
"""
Esempio n. 25
0
def quaternion_init(in_features,
                    out_features,
                    kernel_size=None,
                    criterion="glorot"):
    """Returns a matrix of quaternion numbers initialised with the method
        described in "Quaternion Recurrent Neural Network " - Parcollt T.

    Arguments
    ---------
    in_features : int
        Number of real values of the input layer (quaternion // 4)
    out_features : int
        Number of real values of the output layer (quaternion // 4)
    kernel_size : int
        Kernel_size for convolutional layers (ex: (3,3))
    criterion: str, (glorot, he)
    """

    # We set the numpy seed equal to the torch seed for reproducibility
    # Indeed we use numpy and scipy here. We need % (2**31-1) or, if the
    # seed hasn't been set by the used in the YAML file, torch will generate
    # a double that would be to big for numpy.
    np.random.seed(seed=torch.initial_seed() % (2**31 - 1))

    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == "glorot":
        s = 1.0 / np.sqrt(2 * (fan_in + fan_out))
    else:
        s = 1.0 / np.sqrt(2 * fan_in)

    # Generating randoms and purely imaginary quaternions :
    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        if type(kernel_size) is int:
            kernel_shape = (out_features, in_features) + tuple((kernel_size, ))
        else:
            kernel_shape = (out_features, in_features) + (*kernel_size, )

    modulus = torch.from_numpy(chi.rvs(4, loc=0, scale=s, size=kernel_shape))
    number_of_weights = np.prod(kernel_shape)
    v_i = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
    v_j = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
    v_k = torch.FloatTensor(number_of_weights).uniform_(-1, 1)

    # Purely imaginary quaternions unitary
    for i in range(0, number_of_weights):
        norm = torch.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2) + 0.0001
        v_i[i] /= norm
        v_j[i] /= norm
        v_k[i] /= norm
    v_i = v_i.reshape(kernel_shape)
    v_j = v_j.reshape(kernel_shape)
    v_k = v_k.reshape(kernel_shape)

    phase = torch.rand(kernel_shape).uniform_(-math.pi, math.pi)

    weight_r = modulus * torch.cos(phase)
    weight_i = modulus * v_i * torch.sin(phase)
    weight_j = modulus * v_j * torch.sin(phase)
    weight_k = modulus * v_k * torch.sin(phase)

    return (weight_r, weight_i, weight_j, weight_k)
Esempio n. 26
0
    def fit(self, gType=1):
        self.T = gType
        if self.verbose:
            print "Gaussian Matrix: "
            print "----------------"
            if gType == 0:
                s = " N(0,1)\n"
            elif gType == 1:
                s = " {-1,1}\n"
            elif gType == 2:
                s = " {-1,0,1}\n"
            else:
                print "Error: select G marix"
                raise Exception
            print s

        # --- Software Diagonal Matrices ---
        self.B = self.rng.randint(2, size=(self.k, self.d)) * 2 - 1

        if self.T == 0:
            self.G = self.rng.normal(size=(self.k, self.d))
            coeff = 1
        elif self.T == 1:
            coeff, self.G = sparse_random_matrix(self.k,
                                                 self.d,
                                                 density=1.,
                                                 random_state=self.rng)
        elif self.T == 2:
            coeff, self.G = sparse_random_matrix(self.k,
                                                 self.d,
                                                 density=self.density,
                                                 random_state=self.rng)
        else:
            raise Exception

        self.P = np.hstack([(i * self.d) + self.rng.permutation(self.d)
                            for i in range(self.k)])

        np.random.seed(seed=23)
        self.S = np.multiply(
            1 / l2norm_along_axis1(coeff * self.G).reshape((-1, 1)),
            chi.rvs(self.d, size=(self.k, self.d)))
        self.S = self.S * coeff
        self.S_hw = (1 / (self.sigma * np.sqrt(self.d))
                     ) * self.S  #taken from scale_transform

        self.U = self.rng.uniform(0, 2 * np.pi, size=self.n)
        self.U_hw = 2 * np.pi * self.U
        self.A_hw = np.sqrt(2. / self.n)  #amplitude of cosine LUT

        # --- Hardware RAM-based Shift Registers ----
        '''
        self.Vp - after the permutation
        self.Vg - after the gaussian
        self.Vf - after the 2nd Hadamard
        self.H  - hadamard matrix 
        '''
        B = self.B
        if self.pad > 0:
            B[:, -self.pad:] *= 0
        H = HadamardMatrix(n=self.d)

        # for each k in B, multiply the array elementwise with each row of H and vstack rows
        V = np.vstack([np.multiply(B[i], H) for i in range(self.k)])
        # apply permutation to inidces (n indices for n_dicts)
        p = np.arange(self.n).reshape(1, -1)
        np.take(p, self.P, axis=1, mode='wrap', out=p)
        p = np.ravel(p)
        # then apply permutation of the indices to V. need to transpose because np.take works on cols
        self.Vp = np.take(V.T, p, axis=1, mode='wrap').T

        self.Vg = np.multiply(np.ravel(self.G), self.Vp.T).T

        # reshape H(d, d, 1)
        H = H.reshape(H.shape[0], H.shape[1], 1)

        # multiply combination of inputs after G with hadamard transform
        Vh = np.vstack([
            np.multiply(self.Vg.reshape(self.k, self.d, self.d)[i], H)
            for i in range(self.k)
        ])
        # simplify the expression, so it's represented as a multiple of the input (i.e. 0 + x2 -4x3 + x4)
        Vf = np.zeros(shape=(self.d * self.k, self.d))
        for i in range(self.d * self.k):
            for j in range(self.d):
                Vf[i] += Vh[i][j]
        self.Vf = Vf
        self.H = HadamardMatrix(n=self.d)

        # remove zero columns
        if self.pad > 0:
            self.Vp = self.Vp[:, :-self.pad]
            self.Vg = self.Vg[:, :-self.pad]
            self.Vf = self.Vf[:, :-self.pad]
Esempio n. 27
0
    def mutation(self):
        #---------------------------- Mutation --------------------------------

        # Mirroring
        mode = self.sampling_method
        dim, _lambda, sigma, evalcount, scale, aux = self.dim, self._lambda, self.sigma, self.evalcount, \
            self.scale, self.aux

        if mode == 1 or mode == 11:
            if mod(evalcount + _lambda, 2) != 0:
                half = int(ceil(_lambda / 2.))
                z = randn(dim, half)
                aux = -z[:, -1].reshape(-1, 1)
                z = append(z, -z[:, :-1], axis=1)
            else:
                half = int(floor(_lambda / 2.))
                z = randn(dim, half)
                z = append(z, -z, axis=1)
                if len(aux) != 0:
                    z = append(aux, z, axis=1)
            self.half = half
        # Derandomized step size
        elif mode == 3:
            z = randn(dim, _lambda)
            z = scale * z / sqrt(sum(power(z, 2), 0))

        # Orthogonal mirrored sampling
        elif mode == 4 or mode == 7 or mode == 44:
            if mod(evalcount + _lambda, 2) != 0:
                half = int(ceil(_lambda / 2.))
                z = zeros((dim, half))
                n = int(min([dim, half]))
                if dim < half:
                    z[:, dim:] = randn(dim, half - dim)
                q = qr(randn(dim, n))[0]
                l = chi.rvs(dim, size=n)
                z[:, 0:n] = l * q
                aux = -z[:, -1].reshape(-1, 1)
                z = append(z, -z[:, :-1], axis=1)
            else:
                half = int(floor(_lambda / 2.))
                z = zeros((dim, half))
                n = int(min([dim, half]))
                if dim < half:
                    z[:, dim:] = randn(dim, half - dim)
                q = qr(randn(dim, n))[0]
                l = chi.rvs(dim, size=n)
                z[:, 0:n] = l * q
                z = append(z, -z, axis=1)
                if len(aux) != 0:
                    z = append(aux, z, axis=1)
            self.half = half
        # Orthogonal mirrored sampling...well
        elif mode == 8:
            if mod(evalcount + _lambda, 2) != 0:
                half = ceil(_lambda / 2.)
                z = zeros((dim, half))
                n = min([dim, half])
                if dim < half:
                    z[:, dim:] = randn(dim, half - dim)
                tmp = randn(dim, n)
                l = sqrt(np.sum(power(tmp, 2), 0))
                q = qr(tmp)[0]
                z[:, 0:n] = l * q
                aux = -z[:, -1].reshape(-1, 1)
                z = append(z, -z[:, :-1], axis=1)
            else:
                half = floor(_lambda / 2.)
                z = zeros((dim, half))
                n = min([dim, half])
                if dim < half:
                    z[:, dim:] = randn(dim, half - dim)
                tmp = randn(dim, n)
                l = sqrt(np.sum(power(tmp, 2), 0))
                q = qr(tmp)[0]
                z[:, 0:n] = l * q
                z = append(z, -z, axis=1)
                if len(aux) != 0:
                    z = append(aux, z, axis=1)
            self.half = half
        # Orthogonal sampling (random rotation)
        elif mode == 5:
            z = dot(rand_orth_mat(dim), eye(dim))
            n = dim
            if dim > _lambda:
                p = arange(0, dim)
                shuffle(p)
                z = z[:, p[0:_lambda]]
                n = _lambda
            l = chi.rvs(dim, size=n)
            sign = rand(n)
            sign[sign > .5] = 1
            sign[sign <= .5] = -1
            z = sign * l * z
            if dim < _lambda:
                ss = randn(dim, _lambda - dim)
                z = append(z, ss, axis=1)

        # Orthogonal sampling (Gram-Schmidt)
        elif mode == 6:
            z = zeros((dim, _lambda))
            n = min([dim, _lambda])
            if dim < _lambda:
                z[:, dim:] = randn(dim, _lambda - dim)
            q = qr(randn(dim, n))[0]
            l = chi.rvs(dim, size=n)

        elif mode == 9:
            if mod(evalcount + _lambda, 2) != 0:
                half = int(ceil(_lambda / 2.))
                z = randn(dim, half)
                q = randn(dim, half)
                p = array([q[:, i] - inner(q[:, i], z[:, i])*z[:, i] / norm(z[:, i])**2 \
                    for i in range(half)]).T
                p = (p / sqrt(sum(power(p, 2), 0))) * sqrt(sum(power(q, 2), 0))
                aux = p[:, -1].reshape(-1, 1)
                z = append(z, p[:, :-1], axis=1)
            else:
                half = int(floor(_lambda / 2.))
                z = randn(dim, half)
                q = randn(dim, half)
                p = array([q[:, i] - inner(q[:, i], z[:, i])*z[:, i] / norm(z[:, i])**2 \
                    for i in range(half)]).T
                p = (p / sqrt(sum(power(p, 2), 0))) * sqrt(sum(power(q, 2), 0))
                z = append(z, p, axis=1)
                if len(aux) != 0:
                    z = append(aux, z, axis=1)
            self.half = half
        # Standard mutation
        else:
            z = randn(dim, _lambda)

        self.z = z
        self.offspring = add(self.wcm,
                             sigma * dot(self.e_vector, self.e_value * self.z))
Esempio n. 28
0
 def chi(self):
     p = chi.rvs(*self.dist_parms[:-2],
                 loc=self.dist_parms[-2],
                 scale=self.dist_parms[-1])
     return max(0, p)
Esempio n. 29
0
def estimate_sigma(observed, truncated_df, lower_bound, upper_bound, untruncated_df=0, factor=3, npts=50, nsample=2000):
    """

    Produce an estimate of $\sigma$ from a constrained
    error sum of squares. The relevant distribution is a
    scaled $\chi^2$ restricted to $[0,U]$ where $U$ is `upper_bound`.

    Parameters
    ----------

    observed : float
        The observed sum of squares.

    truncated_df : float
        Degrees of freedom of the truncated $\chi^2$ in the sum of squares.
        The observed sum is assumed to be the sum
        of an independent untruncated $\chi^2$ and the truncated one.

    lower_bound : float
        Lower limit of truncation interval.
    
    upper_bound : float
        Upper limit of truncation interval.
    
    untruncated_df : float
        Degrees of freedom of the untruncated $\chi^2$ in the sum of squares.

    factor : float
        Range of candidate values is 
        [observed/factor, observed*factor]

    npts : int
        How many candidate values for interpolator.

    nsample : int
        How many samples for each expected value
        of the truncated sum of squares.

    Returns
    -------

    sigma_hat : np.float
         Estimate of $\sigma$.
    
    """

    if untruncated_df < 50:
        linear_term = truncated_df**(0.5)
    else:
        linear_term = 0

    total_df = untruncated_df + truncated_df

    values = np.linspace(1./factor, factor, npts) * observed
    expected = 0 * values
    for i, value in enumerate(values):
        P_upper = chidist.cdf(upper_bound * np.sqrt(truncated_df) / value, truncated_df) 
        P_lower = chidist.cdf(lower_bound * np.sqrt(truncated_df) / value, truncated_df) 
        U = np.random.sample(nsample)
        if untruncated_df > 0:
            sample = (chidist.ppf((P_upper - P_lower) * U + P_lower, truncated_df)**2 + chidist.rvs(untruncated_df, size=nsample)**2) * value**2
        else:
            sample = (chidist.ppf((P_upper - P_lower) * U + P_lower, truncated_df) * value)**2
        expected[i] = np.mean(sample) 

        if expected[i] >= 1.5 * (observed**2 * total_df + observed**2 * linear_term):
            break

    interpolant = interp1d(values, expected + values**2 * linear_term)
    V = np.linspace(1./factor,factor,10*npts) * observed

    # this solves for the solution to 
    # expected(sigma) + sqrt(df) * sigma^2 = observed SS * (1 + sqrt(df))
    # the usual "MAP" estimator would have RHS just observed SS
    # but this factor seems to correct it.
    # it is such that if there were no selection it would be 
    # the usual unbiased estimate

    try:
        sigma_hat = np.min(V[interpolant(V) >= observed**2 * total_df + observed**2 * linear_term])
    except ValueError:
        # no solution, just return observed
        sigma_hat = observed
        
    return sigma_hat
# Display the probability density function (``pdf``):

x = np.linspace(chi.ppf(0.01, df), chi.ppf(0.99, df), 100)
ax.plot(x, chi.pdf(x, df), 'r-', lw=5, alpha=0.6, label='chi pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = chi(df)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = chi.ppf([0.001, 0.5, 0.999], df)
np.allclose([0.001, 0.5, 0.999], chi.cdf(vals, df))
# True

# Generate random numbers:

r = chi.rvs(df, size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
Esempio n. 31
0
def main(name):
    fig = plt.figure()
    ax1 = fig.add_subplot(111)
    ax2 = ax1.twinx()
    '''
    read data and parameter initialize for the hadamard transform
    '''
    x_train, y_train, x_test, y_test = get_data(name)
    acc_linear = np.zeros(2)
    acc_binary = np.zeros(2)
    acc_random = np.zeros(2)
    time_linear = np.zeros(2)
    time_binary = np.zeros(2)
    time_random = np.zeros(2)
    marker = ['.', 'v', '^', 's', '*']
    for iter in range(5):
        iter_loss = []
        iter_acc = []
        T = 2**iter
        print(T)
        x, y = x_train, y_train
        n_number, f_num = np.shape(x)
        d = 2**math.ceil(np.log2(f_num))
        G = np.random.randn(T * d)
        B = np.random.uniform(-1, 1, T * d)
        B[B > 0] = 1
        B[B < 0] = -1
        # PI_value = np.hstack
        PI_value = np.hstack([(i * d) + np.random.permutation(d)
                              for i in range(T)])
        G_fro = G.reshape(T, d)
        s_i = chi.rvs(d, size=(T, d))
        S = np.multiply(
            s_i,
            np.array(np.linalg.norm(G_fro, axis=1)**(-0.1)).reshape(T, -1))
        S = S.reshape(1, -1)
        FLAGS.BATCHSIZE = n_number
        class_number = len(np.unique(y))
        if class_number == 2:
            class_number -= 1

        x_value = np.asmatrix(hadamard(d, f_num, x, G, B, PI_value, S, T))
        y_temp = label_processing(y, n_number)
        clf = LinearSVC()
        clf.fit(x_value, y)
        # print(clf.score(x_value,y))
        print('Training coordinate descent initiate from result of linear svm')
        W_fcP = clf.coef_
        W_fcP = np.asmatrix(np.sign(W_fcP.reshape(-1, class_number)))
        W = W_fcP
        n_number, project_d = x_value.shape
        test_number, f_num = np.shape(x_test)
        FLAGS.BATCHSIZE = test_number
        test_x = np.asmatrix(hadamard(d, f_num, x_test, G, B, PI_value, S, T))

        test_y = label_processing(y_test, test_number)
        # original optimization method
        for c in range(class_number):
            W_temp = W[:, c]
            y_temp_c = y_temp[:, c]
            init = np.dot(x_value, W_temp)
            hinge_loss = sklearn.metrics.hinge_loss(y_temp_c, init) * n_number
            loss_new = np.sum(hinge_loss)
            if class_number != 1:
                predict = np.argmax(np.array(np.dot(test_x, W_temp)), axis=1)
                y_lable = np.argmax(test_y, axis=1)
                acc = accuracy_score(np.array(y_lable), np.array(predict))
            else:
                predict = np.array(np.dot(test_x, W_temp))
                acc = accuracy_score(np.sign(test_y), np.sign(predict))
            iter_loss.append(loss_new)
            iter_acc.append(acc)
            loss_old = 2 * loss_new
            # while (loss_old - loss_new) / loss_old >= 1e-6:
            #     loss_old = loss_new
            #     for i in range(project_d):
            #         derta = init - np.multiply(W_temp[i], x_value[:, i]) * 2
            #         loss = sklearn.metrics.hinge_loss(y_temp_c, derta) * n_number
            #         if loss < loss_new:
            #             loss_new = loss
            #             init = derta
            #             W_temp[i] = -W_temp[i]
            #     if class_number != 1:
            #         predict = np.argmax(np.array(np.dot(test_x, W_temp)), axis=1)
            #         y_lable = np.argmax(test_y, axis=1)
            #         acc = accuracy_score(np.array(y_lable), np.array(predict))
            #     else:
            #         predict = np.array(np.dot(test_x, W_temp))
            #         acc = accuracy_score(np.sign(test_y), np.sign(predict))

            plt.scatter(test_x, test_y)
        plt.show()
        exit()
os.system('mkdir -p {}'.format(output_dir))
print(opt)

ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
nc = 3

netG = NetG(ngf, nz, nc, ngpu)
netG.load_state_dict(torch.load(opt.netG, map_location=lambda storage, loc: storage))
netG.eval()
print(netG)

for j in range(opt.niter):
    # step 1
    r = chi.rvs(df=100)

    # step 2
    u = numpy.random.normal(0, 1, nz)
    w = numpy.random.normal(0, 1, nz)
    u /= numpy.linalg.norm(u)
    w /= numpy.linalg.norm(w)

    v = w - numpy.dot(u, w) * u
    v /= numpy.linalg.norm(v)

    ndimgs = []
    for i in range(opt.n_steps):
        t = float(i) / float(opt.n_steps)
        # step 3
        z = numpy.cos(t * 2 * numpy.pi) * u + numpy.sin(t * 2 * numpy.pi) * v