def random_mutation(population, m_probability, m_method):
    SEQUENCE = cupy.random.uniform(0, 1, population.shape[0])
    mutated_population, mutated_chromosome = None, None
    for i in range(population.shape[0]):
        chromosome = population[i]
        if SEQUENCE[i] < m_probability:
            # mutate chromosome
            if m_method[1] == 'gauss':
                mutated_chromosome = gauss_replacement(chromosome)
            elif m_method[1] == 'reset':
                mutated_chromosome = reset_replacement(chromosome)
            else:
                print(
                    '\nError [8]: Invalid mutation method combination [at random mutation]\nExiting...'
                )
                exit()
            # append chromosomes to the mutated population
            if i == 0:
                # if loop run for first time, then initialize the generation population
                mutated_population = mutated_chromosome
            else:
                # after first time, stack chromosomes to the generation population
                mutated_population = cupy.vstack(
                    (mutated_population, mutated_chromosome))
        else:
            # NO mutation
            # append chromosomes to the mutated population
            if i == 0:
                # if loop run for first time, then initialize the generation population
                mutated_population = chromosome
            else:
                # after first time, stack chromosomes to the generation population
                mutated_population = cupy.vstack(
                    (mutated_population, chromosome))
    return mutated_population
    def remove_indices(self):
        """make feature vector `self.features`"""
        # 0<=v,d1,d2,d3<=28, 0<=e<=28*28=784, so v,d1,d2,d3 has 10**2 spaces and e has 10**3 spaces.
        if not self.use_d:
            features = cp.vstack((self.num_vertices, self.num_edges))
            features = features[0] + features[1] * (10**2)  # ve
        else:
            features = cp.vstack((self.num_vertices, self.num_edges,
                                  self.num_id1, self.num_id2, self.num_id3))
            features = features[0] + features[1] * (10**2) + features[2] * (
                10**
                (2 + 3)) + features[3] * (10**(2 + 3 + 2)) + features[4] * (
                    10**(2 + 3 + 2 + 2))  # veid1id2id3

        self.features = cp.unique(features, return_counts=True)

        self.prob_of_measuring_0ket = cp.sum(
            self.features[1]**2) / (2**(2 * self.adjacency_mat.shape[0]))
        divide_value = np.sqrt(cp.sum(self.features[1]**2))
        self.normalized_features = (self.features[0],
                                    self.features[1] / divide_value)

        del self.indices
        del self.num_vertices
        del self.num_edges
        if self.use_d:
            del self.num_id1
            del self.num_id2
            del self.num_id3

        self.features = (cp.asnumpy(self.features[0]),
                         cp.asnumpy(self.features[1]))
        self.normalized_features = (cp.asnumpy(self.normalized_features[0]),
                                    cp.asnumpy(self.normalized_features[1]))
예제 #3
0
def inverse_bwt(string, ind):
    # Перевод строки в список чисел по юникоду
    str_list = [ord(i) for i in string]

    # Переход к массиву на GPU
    # Массив с последовательностью введенной строки
    s_arr = cp.array(str_list)
    # отсортированная последовательность
    sorted_s = cp.array(sorted(str_list))
    # Просто слияние двух предыдущих массивов
    tab_s = cp.vstack((s_arr, sorted_s))
    for i in range(1, len(s_arr) - 1):
        # Сортировка, с получением индексов
        # массива tab_s для новой строки, хотя фактически - столбца
        # (метод .T меняет оси массива местами)
        j = cp.lexsort(
            cp.array([tab_s.T[:, i].tolist() for i in range(i, -1, -1)]))
        # Добавление отсортированной по j строки к массиву
        tab_s = cp.vstack((s_arr, tab_s.T[j].T))
    # Сортировка последней строки
    j = cp.lexsort(
        cp.array(
            [tab_s.T[:, i].tolist() for i in range(len(s_arr) - 1, -1, -1)]))

    # Обратный перевод чисел в симвлы по юникоду
    str_list = [chr(i) for i in tab_s.T[j][ind]]
    return ''.join(str_list)
def uniform_crossover(parent_pairs, x_probability):
    # define random chromosome probability sequence (to decide randomly whether ot perform crossover or not)
    X_SEQUENCE = cupy.random.uniform(0, 1, parent_pairs.shape[0])
    # define random element probability sequence (for uniform crossover)
    U_SEQUENCE = cupy.random.uniform(
        0, 1, parent_pairs.shape[0] * parent_pairs.shape[1]).reshape(
            parent_pairs.shape[0], parent_pairs.shape[1])
    # define element probability crossover (for uniform crossover)
    CONST_PROB = 0.5
    # define new generation population variable
    population_hat = None
    # perform single point crossover
    for i in range(parent_pairs.shape[0]):
        X, Y = parent_pairs[i]
        # check chromosomes' compatibility in case there is an error
        compatible_chromosomes = X.shape == Y.shape
        # define max index boundary
        chromosome_shape = X.shape[0]
        # initialize new chromosome
        a, b = cupy.zeros((2, chromosome_shape), dtype=cupy.int64)
        if not compatible_chromosomes:
            print(
                "Error [14]: Incompatible chromosomes (at: multiple point selection)\nExiting...)"
            )
            exit()
        else:
            # crossover with respect to the crossover probability
            if X_SEQUENCE[i] < x_probability:
                # create children
                for c, (k, l) in enumerate(zip(X, Y)):
                    # repeatedly toss a coin and check with respect to CONST_PROB
                    if CONST_PROB > U_SEQUENCE[i][c]:
                        # gene exchange
                        a[c] = l
                        b[c] = k
                    else:
                        # NO gene exchange
                        a[c] = k
                        b[c] = l
                # append children to form the new population
                if i == 0:
                    # if loop run for first time, then initialize the generation population
                    population_hat = cupy.stack((a, b))
                else:
                    # after first time, stack chromosomes to the generation population
                    population_hat = cupy.vstack(
                        (population_hat, cupy.stack((a, b))))
            else:
                # append parents to the new population
                if i == 0:
                    # if loop run for first time, then initialize the generation population
                    population_hat = cupy.stack((X, Y))
                else:
                    # after first time, stack chromosomes to the generation population
                    population_hat = cupy.vstack(
                        (population_hat, cupy.stack((X, Y))))
    return population_hat
예제 #5
0
    def get_lidar_data(self, input_points: dict):
        print("get one frame lidar data.")
        self.current_frame["lidar_stamp"] = input_points['stamp']
        self.current_frame["lidar_seq"] = input_points['seq']
        self.current_frame["points"] = input_points['points'].T   
        self.lidar_deque.append(deepcopy(self.current_frame))
        if len(self.lidar_deque) == 5:

            ref_from_car = self.imu2lidar
            car_from_global = transform_matrix(self.lidar_deque[-1]['translation'], self.lidar_deque[-1]['rotation'], inverse=True)

            ref_from_car_gpu = cp.asarray(ref_from_car)
            car_from_global_gpu = cp.asarray(car_from_global)

            for i in range(len(self.lidar_deque) - 1):
                last_pc = self.lidar_deque[i]['points']
                last_pc_gpu = cp.asarray(last_pc)

                global_from_car = transform_matrix(self.lidar_deque[i]['translation'], self.lidar_deque[i]['rotation'], inverse=False)
                car_from_current = self.lidar2imu
                global_from_car_gpu = cp.asarray(global_from_car)
                car_from_current_gpu = cp.asarray(car_from_current)

                transform = reduce(
                    cp.dot,
                    [ref_from_car_gpu, car_from_global_gpu, global_from_car_gpu, car_from_current_gpu],
                )
                # tmp_1 = cp.dot(global_from_car_gpu, car_from_current_gpu)
                # tmp_2 = cp.dot(car_from_global_gpu, tmp_1)
                # transform = cp.dot(ref_from_car_gpu, tmp_2)

                last_pc_gpu = cp.vstack((last_pc_gpu[:3, :], cp.ones(last_pc_gpu.shape[1])))
                last_pc_gpu = cp.dot(transform, last_pc_gpu)

                self.pc_list.append(last_pc_gpu[:3, :])

            current_pc = self.lidar_deque[-1]['points']
            current_pc_gpu = cp.asarray(current_pc)
            self.pc_list.append(current_pc_gpu[:3,:])

            all_pc = np.zeros((5, 0), dtype=float)
            for i in range(len(self.pc_list)):
                tmp_pc = cp.vstack((self.pc_list[i], cp.zeros((2, self.pc_list[i].shape[1]))))
                tmp_pc = cp.asnumpy(tmp_pc)
                ref_timestamp = self.lidar_deque[-1]['lidar_stamp'].to_sec()
                timestamp = self.lidar_deque[i]['lidar_stamp'].to_sec()
                tmp_pc[3, ...] = self.lidar_deque[i]['points'][3, ...]
                tmp_pc[4, ...] = ref_timestamp - timestamp
                all_pc = np.hstack((all_pc, tmp_pc))
            
            all_pc = all_pc.T
            print(f" concate pointcloud shape: {all_pc.shape}")

            self.points = all_pc
            sync_cloud = xyz_array_to_pointcloud2(all_pc[:, :3], stamp=self.lidar_deque[-1]["lidar_stamp"], frame_id="lidar_top")
            pub_sync_cloud.publish(sync_cloud)
            return True
예제 #6
0
    def add_new_objs(self, new_obj_n):
        x = cp.random.rand(new_obj_n) * (self.x_max - self.x_min) + self.x_min
        y = cp.random.rand(new_obj_n) * (self.y_max - self.y_min) + self.y_min

        vx = (-1)**cp.random.randint(
            1, 3, new_obj_n) * cp.random.rand(new_obj_n) * self.v_abs
        vy = (-1)**cp.random.randint(1, 3,
                                     new_obj_n) * cp.sqrt(new_obj_n**2 - vx**2)

        new_obj_state = cp.vstack((x, y, vx, vy)).T  # (new_obj_n, 4)

        self.state = cp.vstack((self.state, new_obj_state))
예제 #7
0
    def movepixels_3d(self, Iin, Tx):
        '''
        This function will translate the pixels of an image
        according to Tx translation images. 
        
        Inputs;
        Tx: The transformation image, dscribing the
                     (backwards) translation of every pixel in the x direction.
       
        Outputs,
          Iout : The transformed image
        
        '''

        nx, ny, nz = Iin.shape

        tmpx = cp.linspace(0, nx - 1, num=nx, dtype=np.float32)
        tmpy = cp.linspace(0, ny - 1, num=ny, dtype=np.float32)
        tmpz = cp.linspace(0, nz - 1, num=nz, dtype=np.float32)

        x, y, z = cp.meshgrid(tmpx, tmpy, tmpz, indexing='ij')
        Tlocalx = cp.expand_dims(x + Tx, axis=0)
        y = cp.expand_dims(y, axis=0)
        z = cp.expand_dims(z, axis=0)
        coordinates = cp.vstack((Tlocalx, y, z))

        return map_coordinates(Iin, coordinates, order=1, cval=0)
예제 #8
0
def read_dense_as_cupy_csr(dataset, axis_chunk=6000):
    sub_matrices = []
    for idx in idx_chunks_along_axis(dataset.shape, 0, axis_chunk):
        dense_chunk = dataset[idx]
        sub_matrix = cupy.sparse.csr_matrix(dense_chunk)
        sub_matrices.append(sub_matrix)
    return cupy.vstack(sub_matrices, format="csr")
예제 #9
0
    def calculate_snrs(self, interferometer, waveform_polarizations):
        name = interferometer.name
        signal_ifo = xp.sum(
            xp.vstack([
                waveform_polarizations[mode] * float(
                    interferometer.antenna_response(
                        self.parameters["ra"],
                        self.parameters["dec"],
                        self.parameters["geocent_time"],
                        self.parameters["psi"],
                        mode,
                    )) for mode in waveform_polarizations
            ]),
            axis=0,
        )[interferometer.frequency_mask]

        time_delay = (self.parameters["geocent_time"] -
                      interferometer.strain_data.start_time +
                      interferometer.time_delay_from_geocenter(
                          self.parameters["ra"],
                          self.parameters["dec"],
                          self.parameters["geocent_time"],
                      ))

        signal_ifo *= xp.exp(-2j * np.pi * time_delay * self.frequency_array)

        d_inner_h = xp.sum(
            xp.conj(signal_ifo) * self.strain[name] / self.psds[name])
        h_inner_h = xp.sum(xp.abs(signal_ifo)**2 / self.psds[name])
        return d_inner_h, h_inner_h
def multiple_point_crossover(parent_pairs, x_probability):
    # define random probability sequence
    SEQUENCE = cupy.random.uniform(0, 1, parent_pairs.shape[0])
    # define new generation population variable
    population_hat = None
    # perform single point crossover
    for i in range(parent_pairs.shape[0]):
        X, Y = parent_pairs[i]
        # check chromosomes' compatibility in case there is an error
        compatible_chromosomes = X.shape == Y.shape
        # define max index boundary
        chromosome_shape = X.shape[0]
        # initialize new chromosome
        a, b = cupy.zeros((2, chromosome_shape), dtype=cupy.int64)
        if not compatible_chromosomes:
            print(
                "Error [13]: Incompatible chromosomes (at: multiple point selection\nExiting...)"
            )
            exit()
        else:
            # crossover random point
            x_idx, y_idx = cupy.sort(
                cupy.random.randint(0, chromosome_shape, 2))
            # first child chromosome
            a = cupy.concatenate((X[:x_idx], Y[x_idx:y_idx], X[y_idx:]))
            # second child chromosome
            b = cupy.concatenate((Y[:x_idx], X[x_idx:y_idx], Y[y_idx:]))
            # crossover with respect to the crossover probability
            if SEQUENCE[i] < x_probability:
                # append children to form the new population
                if i == 0:
                    # if loop run for first time, then initialize the generation population
                    population_hat = cupy.stack((a, b))
                else:
                    # after first time, stack chromosomes to the generation population
                    population_hat = cupy.vstack(
                        (population_hat, cupy.stack((a, b))))
            else:
                # append parents to the new population
                if i == 0:
                    # if loop run for first time, then initialize the generation population
                    population_hat = cupy.stack((X, Y))
                else:
                    # after first time, stack chromosomes to the generation population
                    population_hat = cupy.vstack(
                        (population_hat, cupy.stack((X, Y))))
    return population_hat
def topk(array, k):
    assert array.ndim == 2
    top_i = array.astype('float32').argpartition(
        -k
    )[:, -k:]  # cupy.argpartition do whole sorting for implementation reason
    top_i = cp.flip(top_i, axis=1)
    top_v = cp.vstack([array[r, top_i[r]] for r in range(array.shape[0])])
    return top_v, top_i
예제 #12
0
def group_max(data, groups):
    order = cp.lexsort(cp.vstack((data, groups)))
    groups = groups[order]  # this is only needed if groups is unsorted
    data = data[order]
    index = cp.empty(groups.shape[0], 'bool')
    index[-1] = True
    index[:-1] = groups[1:] != groups[:-1]
    return data[index], index
예제 #13
0
def reverse1(d):
    d = rev1.take(d).astype(np.uint16)  # replace values in d by rev1[d]
    d = d[d.min(1) != 0, :] - 1  # remove all batch-items that contain a 0
    mask = d.max(1) > 0xFF
    easy = d[~mask]  # filter out trivial items that need no processing
    hard = d[mask]
    # del d  # this clears out a lot of memory!
    for i in range(32):
        free()
        mask = hard[:, i] > 0xFF
        ignore = hard[~mask]
        low = hard[mask]
        high = hard[mask]
        low[:, i] = low[:, i] & 0xFF
        high[:, i] = high[:, i] >> 8
        hard = np.vstack([ignore, low, high])
    return np.vstack([easy, hard]).astype(np.uint8)
예제 #14
0
 def update(self, dt):
     phase_rep = cp.repeat(self.phase, self.size, axis=0)
     phase_diff = cp.sum(self.coupling*cp.sin(phase_rep.T - phase_rep), axis=0)
     dtheta = (self.internal_freq + phase_diff/self.size)
     self.phase = cp.mod(self.phase + dtheta * dt, 2*np.pi)
     self.hist = cp.vstack((self.hist, self.phase))
     r, phi = self.order()
     self.rs.append(r)
     self.phis.append(phi)
예제 #15
0
 def _Preprocess(T, H):
     T = [t.data for t in T]
     T = [cupy.vstack([cupy.zeros((self.fbsize, t.shape[1]), cupy.float32), t]) for t in T]
     T = [cupy.hstack([t[i:len(t)-(self.fbsize-i)] for i in range(self.fbsize)]) for t in T]
     T = [cupy.fft.irfft(cupy.exp(inum*cupy.angle(cupy.fft.rfft(t))))*dftnorm for t in T]
     T = [Variable(t) for t in T]
     H = [F.reshape(F.concat(F.broadcast_to(h, (self.fs, h.shape[0], h.shape[1])), axis=1), (h.shape[0]*self.fs, -1)) for h in H]
     H = [F.concat([t, h]) for t, h in zip(T, H)]
     return H
예제 #16
0
    def generate_q_u_matrix(x_coordinate: cp.array,
                            y_coordinate: cp.array) -> tuple:
        flatten_flag = x_coordinate.ndim > 1
        if flatten_flag:
            x_coordinate = x_coordinate.flatten()
            y_coordinate = y_coordinate.flatten()

        t, u = cp.modf(y_coordinate)
        u = u.astype(int)
        uy = cp.vstack([
            cp.minimum(cp.maximum(u - 1, 0), h - 1),
            cp.minimum(cp.maximum(u, 0), h - 1),
            cp.minimum(cp.maximum(u + 1, 0), h - 1),
            cp.minimum(cp.maximum(u + 2, 0), h - 1),
        ]).astype(int)
        Qy = cp.dot(
            coeff,
            cp.vstack([
                cp.ones_like(t, dtype=cp.float32), t,
                cp.power(t, 2),
                cp.power(t, 3)
            ]))
        t, u = cp.modf(x_coordinate)
        u = u.astype(int)
        ux = cp.vstack([
            cp.minimum(cp.maximum(u - 1, 0), w - 1),
            cp.minimum(cp.maximum(u, 0), w - 1),
            cp.minimum(cp.maximum(u + 1, 0), w - 1),
            cp.minimum(cp.maximum(u + 2, 0), w - 1),
        ])
        Qx = cp.dot(
            coeff,
            cp.vstack([
                cp.ones_like(t, dtype=cp.float32), t,
                cp.power(t, 2),
                cp.power(t, 3)
            ]))

        if flatten_flag:
            Qx = Qx.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy()
            Qy = Qy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy()
            ux = ux.reshape(4, frame_n, int(w * mag)).transpose(1, 0, 2).copy()
            uy = uy.reshape(4, frame_n, int(h * mag)).transpose(1, 0, 2).copy()
        return Qx, Qy, ux, uy
예제 #17
0
 def update_mini_batch(self, mini_batch, eta):
     """Update the network's weights and biases by applying
     gradient descent using backpropagation to a single mini batch.
     The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
     is the learning rate."""
     xs, ys = mini_batch.T
     xs = cp.array(
         cp.vstack(xs).astype(np.float64).reshape((-1, self.sizes[0], 1)))
     ys = cp.array(
         cp.vstack(ys).astype(np.float64).reshape((-1, self.sizes[-1], 1)))
     delta_nabla_b, delta_nabla_w = self.backprop_batch(xs, ys)
     self.weights = [
         w - eta * cp.mean(nw, axis=0)
         for w, nw in zip(self.weights, delta_nabla_w)
     ]
     self.biases = [
         b - eta * cp.mean(nb, axis=0)
         for b, nb in zip(self.biases, delta_nabla_b)
     ]
예제 #18
0
파일: _toolbox.py 프로젝트: lixfz/Hypernets
    def fix_binary_predict_proba_result(proba):
        if proba.ndim == 1:
            if CumlToolBox.is_cupy_array(proba):
                proba = cupy.vstack([1 - proba, proba]).T
            else:
                proba = cudf.Series(proba)
                proba = cudf.concat([1 - proba, proba], axis=1)
        elif proba.shape[1] == 1:
            proba = cupy.hstack([1 - proba, proba])

        return proba
예제 #19
0
    def __init__(self,basis_number,extended_basis_number,t_start = 0,t_end=1,mempool=None):
        self.basis_number = basis_number
        self.extended_basis_number = extended_basis_number
        self.basis_number_2D = (2*basis_number-1)*basis_number
        self.basis_number_2D_ravel = (2*basis_number*basis_number-2*basis_number+1)
        self.basis_number_2D_sym = (2*basis_number-1)*(2*basis_number-1)
        self.extended_basis_number_2D = (2*extended_basis_number-1)*extended_basis_number
        self.extended_basis_number_2D_sym = (2*extended_basis_number-1)*(2*extended_basis_number-1)
        self.t_end = t_end
        self.t_start = t_start
        self.verbose = True
        if mempool is None:
            mempool = cp.get_default_memory_pool()
        # pinned_mempool = cp.get_default_pinned_memory_pool()
        
        
        # self.ix = cp.zeros((2*self.basis_number-1,2*self.basis_number-1),dtype=cp.int32)
        # self.iy = cp.zeros((2*self.basis_number-1,2*self.basis_number-1),dtype=cp.int32)
        temp = cp.arange(-(self.basis_number-1),self.basis_number,dtype=cp.int32)
        # for i in range(2*self.basis_number-1):
        #     self.ix[i,:] = temp
        #     self.iy[:,i] = temp

        self.ix,self.iy = cp.meshgrid(temp,temp)
        if self.verbose:
            print("Used bytes so far, after creating ix and iy {}".format(mempool.used_bytes()))
        
        
        self.Ddiag = -(2*util.PI)**2*(self.ix.ravel(ORDER)**2+self.iy.ravel(ORDER)**2)
        self.OnePerDdiag = 1/self.Ddiag
        self.Dmatrix = cpx.scipy.sparse.diags(self.Ddiag,dtype=cp.float32)
        if self.verbose:
            print("Used bytes so far, after creating Dmatrix {}".format(mempool.used_bytes()))

        # self.Imatrix = cp.eye((2*self.basis_number-1)**2,dtype=cp.int8)
        self.Imatrix = cpx.scipy.sparse.identity((2*self.basis_number-1)**2,dtype=cp.float32)
        self.Imatrix_dense = cp.eye((2*self.basis_number-1)**2,dtype=cp.float32)
        if self.verbose:
            print("Used bytes so far, after creating Imatrix {}".format(mempool.used_bytes()))

        #K matrix --> 1D fourier index to 2D fourier index
        ones_temp = cp.ones_like(temp)
        self.Kmatrix = cp.vstack((cp.kron(temp,ones_temp),cp.kron(ones_temp,temp)))
        if self.verbose:
            print("Used bytes so far, after creating Kmatrix {}".format(mempool.used_bytes()))
        
        #implement fft plan here
        self._plan_fft2 = None
        # self._fft2_axes = (-2, -1)
        self._plan_ifft2 = None

        x = np.concatenate((np.arange(self.basis_number)+1,np.zeros(self.basis_number-1)))
        toep = sla.toeplitz(x)
        self._Umask = cp.asarray(np.kron(toep,toep),dtype=cp.int16)
예제 #20
0
def main():
    X, Y = getBinaryData()

    X0 = X[Y == 0, :]
    X1 = X[Y == 1, :]
    X1 = cp.repeat(X1, 9, axis=0)
    X = cp.vstack([X0, X1])
    Y = cp.array([0] * len(X0) + [1] * len(X1))

    model = LogisticModel()
    model.fit(X, Y, show_fig=True)
    model.score(X, Y)
예제 #21
0
def test_oob_coodinates():
    offset = 2
    idx = pyth_image.shape[0] + offset
    prof = profile_line(pyth_image, (-offset, 2), (idx, 2),
                        linewidth=1,
                        order=0,
                        reduce_func=None,
                        mode='constant')
    expected_prof = cp.vstack([
        cp.zeros((offset, 1)), pyth_image[:, 2, cp.newaxis],
        cp.zeros((offset + 1, 1))
    ])
    assert_array_almost_equal(prof, expected_prof)
예제 #22
0
def block_hankel(data, f):
    """
    Create a block hankel matrix.
    Args:
        data (float): Array.
        f (int): number of rows

    Returns:
        Hankel matrix of f rows.
    """
    n = data.shape[1] - f
    return np.vstack(
        [np.hstack([data[:, i + j] for i in range(f)]) for j in range(n)]).T
예제 #23
0
 def predict(self,test_dl):
     yps = []
     self.model.load_state_dict(torch.load(self.params['model_path']))
     for batch in test_dl:
         xb, _ = batch
         xb = xb.cuda()
         pred = self.model(xb)
         pred = to_dlpack(pred.detach())
         yps.append(cp.fromDlpack(pred))
     yps = cp.vstack(yps)
     if yps.shape[1] == 1:
         yps = yps.ravel()
     return yps
예제 #24
0
def to_pixels(points, x_min, x_max, x_dim, y_min, y_max, y_dim):
    pixels = np.vstack((points.real, points.imag)).T
    pixels[:, 0] -= y_min
    pixels[:, 1] -= x_min
    pixels[:, 0] /= y_max - y_min
    pixels[:, 1] /= x_max - x_min
    pixels[:, 0] *= y_dim
    pixels[:, 1] *= x_dim
    pixels = pixels.astype(np.int32)
    pixels = pixels[(pixels[:, 0] >= 0)
                    & (pixels[:, 0] < y_dim)
                    & (pixels[:, 1] >= 0)
                    & (pixels[:, 1] < x_dim)]
    return pixels
예제 #25
0
    def fit(self, include_be_used, is_test=False):
        print('training started')
        final_loss = 0.0
        if is_test:
            embeddings = self.model.embeddings_test = cp.vstack(
                (self.model.embeddings_train, self.model.embeddings_test))
            relations_matrix = self.model.relations_matrix_test
        else:
            embeddings = self.model.embeddings_train
            relations_matrix = self.model.relations_matrix_train

        self.losses = [0.0 for _ in range(self.model.epochs)]
        start = time.time()
        if include_be_used:
            relations_matrix_transpose = relations_matrix.transpose().tocsr()
        else:
            relations_matrix_transpose = self.model.relations_matrix_test_not_be_used_from_other_class.transpose(
            ).tocsr()
        self.model.loop_counter_for_negative_sampling = 0

        for epoch in tqdm(range(self.model.epochs)):

            learn_rate = self.calc_learn_rate(epoch)
            target_embeddings = relations_matrix.dot(embeddings)

            if epoch + 1 == self.model.epochs:
                if is_test:
                    last_loss = self.calc_loss \
                        (embeddings[self.model.num_of_train_nodes:],
                         target_embeddings[self.model.num_of_train_nodes:])
                else:
                    last_loss = self.calc_loss(embeddings, target_embeddings)

                if math.isnan(last_loss):
                    return 999999
                self.losses[epoch] = last_loss
                final_loss = last_loss

            # 各要素の想定ベクトルを計算してそれを用いて算出した傾きを元に勾配降下
            self.calc_grad_and_update_embeddings(embeddings, target_embeddings,
                                                 is_test, learn_rate,
                                                 relations_matrix_transpose)
            # negative sampling
            # if do_negative_sampling:
            self.negative_sampling(embeddings, is_test)
            # norm罰則計算
            self.norm_penalty(embeddings, is_test, learn_rate)

        print('elapsed_time for training : {}'.format(time.time() - start))
        return final_loss
def Generate_S1(J, T):
    S = cp.random.normal(0, .01, J * 5)
    S = S.reshape(5, 1, J)
    EPS = cp.random.normal(0, .01, 3 * J * (T + 1))
    EPS = EPS.reshape(3, T + 1, J)
    Lx = cp.vstack(
        (cp.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])))
    Li = np.vstack(
        (cp.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])))
    Lpi = cp.vstack(
        (cp.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
         cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])))
    R = cp.reshape(
        np.vstack(
            (cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
                       0]))), [3, 18])
    U = cp.reshape(
        np.vstack(
            (cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]),
             cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       1]))), [3, 18])
    I = cp.reshape(
        cp.vstack(
            (cp.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
             cp.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
             cp.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0]))), [3, 18])
    I = cp.repeat(I[:, :, cp.newaxis], J, axis=2)
    return EPS, S, Lx, Lpi, Li, I, R, U
예제 #27
0
 def _project_slab(self, axis):
     proj = []
     for data in self._load_slab():
         func = getattr(data, self._mode)
         proj.append(func(axis=axis))
         # release the reference in this loop
         func = None
         data = None
     if axis == 0:
         # z is an aggregated axis, sum together
         proj = sum(proj)
     else:
         # z is a visible axis, stack together
         proj = cp.vstack(proj)
     return cp.asnumpy(proj)
예제 #28
0
def _shift_selem(selem, shift_x, shift_y):
    """Shift the binary image `selem` in the left and/or up.

    This only affects 2D structuring elements with even number of rows
    or columns.

    Parameters
    ----------
    selem : 2D array, shape (M, N)
        The input structuring element.
    shift_x, shift_y : bool
        Whether to move `selem` along each axis.

    Returns
    -------
    out : 2D array, shape (M + int(shift_x), N + int(shift_y))
        The shifted structuring element.
    """
    if selem.ndim != 2:
        # do nothing for 1D or 3D or higher structuring elements
        return selem
    m, n = selem.shape
    if m % 2 == 0:
        extra_row = cp.zeros((1, n), selem.dtype)
        if shift_x:
            selem = cp.vstack((selem, extra_row))
        else:
            selem = cp.vstack((extra_row, selem))
        m += 1
    if n % 2 == 0:
        extra_col = cp.zeros((m, 1), selem.dtype)
        if shift_y:
            selem = cp.hstack((selem, extra_col))
        else:
            selem = cp.hstack((extra_col, selem))
    return selem
예제 #29
0
    def one_step_for_sqrtBetas(self, Layers):
        sqrt_beta_noises = self.stdev_sqrtBetas * cp.random.randn(
            self.n_layers)
        propSqrtBetas = cp.zeros(self.n_layers, dtype=cp.float32)

        for i in range(self.n_layers):

            temp = cp.sqrt(1 - self.pcn_step_sqrtBetas**2) * Layers[
                i].sqrt_beta + self.pcn_step_sqrtBetas * sqrt_beta_noises[i]
            propSqrtBetas[i] = max(temp, 1e-4)
            if i == 0:
                stdev_sym_temp = (propSqrtBetas[i] /
                                  Layers[i].sqrt_beta) * Layers[i].stdev_sym
                Layers[i].new_sample_sym = stdev_sym_temp * Layers[
                    i].current_noise_sample
            else:
                Layers[i].LMat.construct_from_with_sqrt_beta(
                    Layers[i - 1].new_sample, propSqrtBetas[i])
                if i < self.n_layers - 1:
                    Layers[i].new_sample_sym = cp.linalg.solve(
                        Layers[i].LMat.latest_computed_L,
                        Layers[i].current_noise_sample)
                else:
                    wNew = Layers[-1].current_noise_sample
                    eNew = cp.random.randn(self.measurement.num_sample)
                    wBar = cp.concatenate((eNew, wNew))
                    LBar = cp.vstack(
                        (self.H, Layers[-1].LMat.latest_computed_L))
                    v, res, rnk, s = cp.linalg.lstsq(LBar, self.yBar - wBar)
                    Layers[-1].new_sample_sym = v
                    Layers[i].new_sample = Layers[i].new_sample_sym[
                        self.fourier.basis_number_2D_ravel - 1:]

        logRatio = 0.5 * (util.norm2(self.y / self.measurement.stdev -
                                     self.H @ Layers[-1].current_sample_sym))
        logRatio -= 0.5 * (util.norm2(self.y / self.measurement.stdev -
                                      self.H @ Layers[-1].new_sample_sym))

        if logRatio > cp.log(cp.random.rand()):
            # print('Proposal sqrt_beta accepted!')
            self.Layers_sqrtBetas = propSqrtBetas
            for i in range(self.n_layers):
                Layers[i].sqrt_beta = propSqrtBetas[i]
                Layers[i].LMat.set_current_L_to_latest()
                if Layers[i].is_stationary:
                    Layers[i].stdev_sym = stdev_sym_temp
                    Layers[i].stdev = Layers[i].stdev_sym[
                        self.fourier.basis_number_2D_ravel - 1:]
예제 #30
0
 def calculate_W_out(self, Y_target, N_x, beta, train_start_timestep,
                     train_end_timestep):
     # see Lukosevicius Practical ESN eqtn 11
     # Using ridge regression
     N_u = sp.shape(Y_target)[0]
     X = cp.array(
         cp.vstack(
             (cp.ones((1, train_end_timestep - train_start_timestep)),
              cp.array(Y_target[:,
                                train_start_timestep:train_end_timestep]),
              self.x[train_start_timestep:train_end_timestep].transpose())))
     # Ridge Regression
     self.W_out = cp.asnumpy(cp.matmul(cp.array(Y_target[:, train_start_timestep+1:train_end_timestep+1]),\
                                       cp.matmul(X.transpose(),
                                                 cp.array(cp.linalg.inv(
                                                     cp.add(cp.matmul(X,X.transpose()),beta*cp.identity(1+N_x+N_u)))))))
예제 #31
0
 def test_vstack_wrong_ndim(self):
     a = cupy.empty((3,))
     b = cupy.empty((3, 1))
     with self.assertRaises(ValueError):
         cupy.vstack((a, b))
예제 #32
0
      t = chainer.Variable(cp.asarray(train_y[perm[i:i + BATCH_SIZE]]), volatile='off')

      optimizer.update(model, x, t)

      train_loss += float(model.loss.data) * len(t.data)
      train_accuracy += float(model.accuracy.data) * len(t.data)

    epoch_result = None
    test_accuracy, test_loss = 0, 0
    for i in range(0, NUM_TEST, BATCH_SIZE):
      x = chainer.Variable(cp.asarray(test_x[i:i + BATCH_SIZE]), volatile='on')
      t = chainer.Variable(cp.asarray(test_y[i:i + BATCH_SIZE]), volatile='on')
      batch_result = model(x, t, False)
      if epoch == NUM_EPOCH:
        if i == 0:
          epoch_result = batch_result
        else:
          epoch_result.data = cp.vstack((epoch_result.data, batch_result.data))
      test_loss += float(model.loss.data) * len(t.data)
      test_accuracy += float(model.accuracy.data) * len(t.data)
    print 'epoch-{}-{}: [TRAIN] loss: {} accuracy: {}, [TEST] loss: {} accuracy: {}'.format(j+1, epoch,
        train_loss / NUM_TRAIN, train_accuracy / NUM_TRAIN, test_loss / NUM_TEST, test_accuracy / NUM_TEST)
    if epoch == NUM_EPOCH:
      if j == 0:
        all_results = epoch_result
      else:
        all_results += epoch_result
        y = chainer.Variable(cp.asarray(test_y), volatile='on')
        en_acc = F.accuracy(all_results, y)
        print 'ENSUMBLE RESULT:\n\t accuracy: {}'.format(en_acc.data)