Example #1
0
def min(tensor, axis=None, out=None, keepdims=False):
    """Returns the minimum of an array or the maximum along an axis.

    Note::
       When at least one element is NaN, the corresponding min value will be
       NaN.

    Args:
        tensor (ndarray): Array to take the maximum.
        axis (int): Along which axis to take the maximum. The flattened array
            is used by default. Defaults to None.
        out (ndarray): Output array. Default to None.

        keepdims (bool): If ``True``, the axis is kept as an axis of
        size one. Default to False.

    Returns:
        ndarray: The maximum of ``tensor``, along the axis if specified.
    """

    # cupy don't support keepdims.
    if keepdims:
        return numpy.amin(tensor, axis=axis, out=out, keepdims=keepdims)
    else:
        return cp.amin(tensor, axis=axis, out=out, keepdims=keepdims)
Example #2
0
 def __init__(self,
              xx,
              yy,
              minimum=xp.nan,
              maximum=xp.nan,
              name=None,
              latex_label=None,
              unit=None,
              boundary=None):
     self.xx = xp.asarray(xx)
     self.min_limit = float(xp.amin(self.xx))
     self.max_limit = float(xp.amax(self.xx))
     # In order to use np/cp.interp, we need to make sure that xx is ordered
     sorted_idxs = xp.argsort(self.xx)
     self.xx = self.xx[sorted_idxs]
     self._yy = xp.asarray(yy)[sorted_idxs]
     if self._yy.ndim != 1:
         raise TypeError("yy must be 1D. A {}-D array given.".format(
             self.yy.dim))
     self.YY = None
     self.probability_density = None
     self.cumulative_distribution = None
     self.inverse_cumulative_distribution = None
     self.__all_interpolated = Interp(self.xx, self._yy)
     minimum = float(xp.nanmax(xp.array([self.min_limit, minimum])))
     maximum = float(xp.nanmin(xp.array([self.max_limit, maximum])))
     bilby.core.prior.Prior.__init__(self,
                                     name=name,
                                     latex_label=latex_label,
                                     unit=unit,
                                     minimum=minimum,
                                     maximum=maximum,
                                     boundary=boundary)
     self._update_instance()
Example #3
0
def calcDistField(point_file, h5name, save_location):
    data_file = h5py.File(h5name)
    data = data_file['data'][:]
    data_dim = data.shape[0]
    data_file.close()
    ptfile = h5py.File(point_file)
    sample_points = ptfile['points'][:]
    ptfile.close()
    sample_size = sample_points.shape[0]

    #gpu parallelization
    memory_pool = cupy.get_default_memory_pool()
    pinned_memory_pool = cupy.get_default_pinned_memory_pool()

    distancesgpu = numpy.zeros((data_dim, data.shape[1], sample_size))
    x = cupy.asarray(sample_points)
    allpts = cupy.tile(x, (data.shape[1], 1))
    blocks = int(numpy.ceil(sample_size * data.shape[1] / 8192))
    del x
    print(blocks)
    yy = cupy.asarray(data)
    for inst in range(data_dim):
        if inst % 200 == 0:
            print(inst)
        y = yy[inst]

        xx = allpts + cupy.tile(y, (1, sample_size)).reshape(-1, 3)
        xdot = cupy.sum(cupy.multiply(xx, xx), axis=1)
        dt = cupy.zeros((sample_size * data.shape[1], ))
        for blk in range(blocks):
            idstart = int(blk * 8192)
            idend = int((blk + 1) * 8192)

            dists = cupy.tile(xdot[idstart:idend], (y.shape[0], 1)).transpose(
            ) - 2 * cupy.matmul(xx[idstart:idend], y.transpose()) + cupy.tile(
                cupy.sum(cupy.multiply(y, y), axis=1).transpose(),
                (xx[idstart:idend].shape[0], 1))
            dt[idstart:idend] = cupy.amin(dists, axis=1)
            del dists
        dt = cupy.reshape(dt, (-1, sample_size))
        distancesgpu[inst] = cupy.asnumpy(dt)
        del dt
        del xx
        del xdot
    memory_pool.free_all_blocks()
    pinned_memory_pool.free_all_blocks()
    # save file
    saveh5 = h5py.File(save_location, 'w')
    saveh5.create_dataset('distances', data=distancesgpu)
    saveh5.close()
Example #4
0
def _terrain_cupy(data: cupy.ndarray, seed: int, x_range_scaled: tuple,
                  y_range_scaled: tuple, zfactor: int) -> cupy.ndarray:

    data = data * 0

    data[:] = _terrain_gpu(data,
                           seed,
                           x_range=x_range_scaled,
                           y_range=y_range_scaled)
    minimum = cupy.amin(data)
    maximum = cupy.amax(data)

    data[:] = (data - minimum) / (maximum - minimum)
    data[data < 0.3] = 0  # create water
    data *= zfactor

    return data
Example #5
0
def _perlin_cupy(data: cupy.ndarray, freq: tuple, seed: int) -> cupy.ndarray:

    # cupy.random.seed(seed)
    # p = cupy.random.permutation(2**20)

    # use numpy.random then transfer data to GPU to ensure the same result
    # when running numpy backed and cupy backed data array.
    np.random.seed(seed)
    p = cupy.asarray(np.random.permutation(2**20))
    p = cupy.append(p, p)

    griddim, blockdim = cuda_args(data.shape)
    _perlin_gpu[griddim, blockdim](p, 0, freq[0], 0, freq[1], 1, data)

    minimum = cupy.amin(data)
    maximum = cupy.amax(data)
    data[:] = (data - minimum) / (maximum - minimum)
    return data
    def clac_all_distance(self, pos, pts):

        ### Calc Distance with Cupy

        ### Generate Vector
        v = pos - pts
        # print("v.shape :", v.shape)
        # print(v)

        vt = v.T

        ### Calc Distance
        d = cp.sqrt((vt[0] * vt[0]) + (vt[1] * vt[1]) + (vt[2] * vt[2]))
        # print("d.shape :", d.shape)

        ### Select Min Value
        dm_cp = cp.amin(d, axis=0)
        # print("dm.shape :", dm_cp.shape)

        return dm_cp
def smear(f, fb, pairs):
    """
    build smear matrix B for bp

    Parameters
    ----------
    f : NDArray
        potential on nodes
    fb : NDArray
        potential on adjacent electrodes
    pairs : NDArray
        electrodes numbering pairs

    Returns
    -------
    NDArray
        back-projection matrix
    """
    #b_matrix = np.empty(size=(len(pairs), len(f)))
    #t1 = time()
    #b_matrix = []
    f = cp.array(f)
    fb = cp.array(fb)
    pairs = cp.array(pairs)
    i = cp.arange(len(pairs))
    min_fb = cp.amin(fb[pairs], axis=1)
    max_fb = cp.amax(fb[pairs], axis=1)
    b_matrix = cp.empty((len(pairs), len(f)))
    #index[i, :] = (min_fb[i] < f.all()) & (f.all() <= max_fb[i])
    b_matrix[:] = (min_fb[i, None] < f[None]) & (f[None] <= max_fb[i, None])
    #t2 = time()
    '''
    for i, j in pairs:
        f_min, f_max = min(fb[i], fb[j]), max(fb[i], fb[j])
        b_matrix.append((f_min < f) & (f <= f_max))
    b_matrix = np.array(b_matrix)
    '''
    #print("matrices: ", t2 - t1)
    #print("their loop ", time() - t2)
    return cp.asnumpy(b_matrix)
Example #8
0
def estimate_stats(voltages, stats_calc_num_samples=10000):
    """
    Estimate mean and standard deviation, truncating to at most `stats_calc_num_samples` samples 
    to reduce computation.

    Parameters
    ----------
    voltages : array
        Array of voltages
    stats_calc_num_samples : int, optional
        Maximum number of samples for use in estimating noise statistics
        
    Returns
    -------
    data_mean : float
        Mean of voltages
    data_sigma : float
        Standard deviation of voltages
    """
    calc_len = xp.amin(xp.array([stats_calc_num_samples, len(voltages)]))
    data_sigma = xp.std(voltages[:calc_len])
    data_mean = xp.mean(voltages[:calc_len])

    return data_mean, data_sigma
def getNextPrediction(fileJac: str, measuring_electrodes: np.ndarray, voltages: np.ndarray, 
              num_returned: int=10, n_el: int=20, n_per_el: int=3, n_pix: int=64, pert: float=0.5, 
              p_influence: float=-10., p_rec: float=10., p: float=0.2, lamb:float=0.1) -> np.ndarray:
    # extract const permittivity jacobian and voltage (& other)
    file = h5.File(fileJac, 'r')

    meas = file['meas'][()]
    new_ind = file['new_ind'][()]
    p = file['p'][()]
    t = file['t'][()]
    file.close()
    # initialise const permitivity and el_pos variables
    perm = np.ones(t.shape[0], dtype=np.float32)
    el_pos = np.arange(n_el * n_per_el).astype(np.int16)
    mesh_obj = {'element': t,
        'node':    p,
        'perm':    perm}
    # list all possible active/measuring electrode permutations of this measurement
    meas = cp.array(meas)
    # find their indices in the already calculated const. permitivity Jacobian (CPJ)
    measuring_electrodes = cp.array(measuring_electrodes)
    measurements_0 = cp.amin(measuring_electrodes[:, :2], axis=1)
    measurements_1 = cp.amax(measuring_electrodes[:, :2], axis=1)
    measurements_2 = cp.amin(measuring_electrodes[:, 2:], axis=1)
    measurements_3 = cp.amax(measuring_electrodes[:, 2:], axis=1)
    measuring_electrodes = cp.empty((len(measuring_electrodes), 4))
    measuring_electrodes[:, 0] = measurements_0
    measuring_electrodes[:, 1] = measurements_1
    measuring_electrodes[:, 2] = measurements_2
    measuring_electrodes[:, 3] = measurements_3
    index = (cp.sum(cp.equal(measuring_electrodes[:, None, :], meas[None, :, :]), axis=2) == 4)
    index = cp.where(index)
    #print(index)
    ind = cp.unique(index[1])
    #print(ind)
    i = cp.asnumpy(ind)
    j = index[0]
    mask = np.zeros(len(meas), dtype=int)
    mask[i] = 1
    mask = mask.astype(bool)
    # take a slice of Jacobian, voltage readings and B matrix (the one corresponding to the performed measurements)
    file = h5.File(fileJac, 'r')
    jac = file['jac'][mask, :][()]
    v = file['v'][mask][()]
    b = file['b'][mask, :][()]
    file.close()
    # put them in the form desired by the GREIT function
    pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
    f = pde_result(jac=jac,
           v=v,
           b_matrix=b)
    
    # now we can use the real voltage readings and the GREIT algorithm to reconstruct
    greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
    greit.setup(p=p, lamb=lamb, n=n_pix)
    h_mat = greit.H
    reconstruction = greit.solve(voltages, f.v).reshape(n_pix, n_pix)
    # fix_electrodes_multiple is in meshing.py
    _, el_coords = train.fix_electrodes_multiple(centre=None, edgeX=0.1, edgeY=0.1, a=2, b=2, ppl=n_el, el_width=0.02, num_per_el=3)
    # find the distances between each existing electrode pair and the pixels lying on the liine that connects them
    pixel_indices, voltage_all_possible = measopt.find_all_distances(reconstruction, h_mat, el_coords, n_el, cutoff=0.8)
    # call function get_total_map that generates the influence map, the gradient map and the log-reconstruction
    total_map, grad_mat, rec_log = np.abs(measopt.get_total_map(reconstruction, voltages, h_mat, pert=pert, p_influence=p_influence, p_rec=p_rec))
    # get the indices of the total map along the lines connecting each possible electrode pair
    total_maps_along_lines = total_map[None] * pixel_indices
    # find how close each connecting line passes to the boundary of an anomaly (where gradient supposed to be higher)
    proximity_to_boundary = np.sum(total_maps_along_lines, axis=(1, 2)) / np.sum(pixel_indices, axis=(1, 2))
    # rate the possible src-sink pairs by their proximity to existing anomalies
    proposed_ex_line = voltage_all_possible[np.argsort(proximity_to_boundary)[::-1]][:num_returned]

    number_of_voltages = 10
    # generate the voltage measuring electrodes for this current driver pair
    proposed_voltage_pairs = measopt.findNextVoltagePair(proposed_ex_line[0], fileJac, total_map, number_of_voltages, 0, npix=n_pix, cutoff=0.97)
    return proposed_ex_line, proposed_voltage_pairs, reconstruction, total_map
def simulateMeasurements(fileJac, anomaly=0, measurements=None, v_meas=None, n_el=20, n_per_el=3, n_pix=64, a=2.):
	# extract const permittivity jacobian and voltage (& other)
	file = h5.File(fileJac, 'r')

	meas = file['meas'][()]
	new_ind = file['new_ind'][()]
	p = file['p'][()]
	t = file['t'][()]
	file.close()
	# initialise const permitivity and el_pos variables
	perm = np.ones(t.shape[0], dtype=np.float32)
	el_pos = np.arange(n_el * n_per_el).astype(np.int16)
	mesh_obj = {'element': t,
				'node':	p,
				'perm':	perm}

	#for testing
	if measurements is None:
		el_dist = np.random.randint(1, 20)
		ex_mat = (cp.concatenate((cp.arange(20)[None], (cp.arange(20) + el_dist)[None])) % 20).T
		#print(ex_mat.shape)
		fem_all = Forward(mesh_obj, el_pos)
		measurements = fem_all.voltMeter(ex_mat)
		#ex_mat = mesurements[1]
		measurements = cp.concatenate((measurements[1], measurements[0]), axis=1)
		#print(measurements.shape)
	# list all possible active/measuring electrode permutations of this measurement
	meas = cp.array(meas)
	# find their indices in the already calculated const. permitivity Jacobian (CPJ)
	measurements = cp.array(measurements)
	measurements_0 = cp.amin(measurements[:, :2], axis=1)
	measurements_1 = cp.amax(measurements[:, :2], axis=1)
	measurements_2 = cp.amin(measurements[:, 2:], axis=1)
	measurements_3 = cp.amax(measurements[:, 2:], axis=1)
	measurements = cp.empty((len(measurements), 4))
	measurements[:, 0] = measurements_0
	measurements[:, 1] = measurements_1
	measurements[:, 2] = measurements_2
	measurements[:, 3] = measurements_3
	index = (cp.sum(cp.equal(measurements[:, None, :], meas[None, :, :]), axis=2) == 4)
	index = cp.where(index)
	ind = cp.unique(index[1])
	i = cp.asnumpy(ind)
	j = index[0]
	mask = np.zeros(len(meas), dtype=int)
	mask[i] = 1
	mask = mask.astype(bool)
	# take a slice of Jacobian, voltage readings and B matrix
	file = h5.File(fileJac, 'r')
	jac = file['jac'][mask, :][()]
	v = file['v'][mask][()]
	b = file['b'][mask, :][()]
	file.close()
	pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
	f = pde_result(jac=jac,
				   v=v,
				   b_matrix=b)
	
	# simulate voltage readings if not given
	if v_meas is None:
		if np.isscalar(anomaly):
			print("generating new anomaly")
			anomaly = train.generate_anoms(a, a)
		true = train.generate_examplary_output(a, int(n_pix), anomaly)
		mesh_new = train.set_perm(mesh_obj, anomaly=anomaly, background=1)
		fem = FEM(mesh_obj, el_pos, n_el)
		new_ind = cp.array(new_ind)
		f2, raw = fem.solve_eit(volt_mat_all=meas[ind, 2:], new_ind=new_ind[ind], ex_mat=meas[ind, :2], parser=None, perm=mesh_new['perm'].astype('f8'))
		v_meas = f2.v
		'''
		#plot
		fig = plt.figure(3)
		x, y = p[:, 0], p[:, 1]
		ax1 = fig.add_subplot(111)
		# draw equi-potential lines
		print(raw.shape)
		raw = cp.asnumpy(raw[5]).ravel()
		vf = np.linspace(min(raw), max(raw), 32)
		ax1.tricontour(x, y, t, raw, vf, cmap=plt.cm.viridis)
		# draw mesh structure
		ax1.tripcolor(x, y, t, np.real(perm),
					  edgecolors='k', shading='flat', alpha=0.5,
					  cmap=plt.cm.Greys)

		ax1.plot(x[el_pos], y[el_pos], 'ro')
		for i, e in enumerate(el_pos):
			ax1.text(x[e], y[e], str(i+1), size=12)
		ax1.set_title('Equipotential Lines of Uniform Permittivity')
		# clean up
		ax1.set_aspect('equal')
		ax1.set_ylim([-1.2, 1.2])
		ax1.set_xlim([-1.2, 1.2])
		fig.set_size_inches(6, 6)
		#plt.show()'''
	elif len(measurements) == len(v_meas):
		measurements = np.array(measurements)
		v_meas = np.array(v_meas[j[:len(ind)]])
	else:
		raise ValueError('Sizes of arrays do not match (have to have voltage reading for each measurement). If you don\'t have readings, leave empty for simulation.')
	print('Number of measurements:', len(v_meas), len(f.v))

	# now we can use the real voltage readings and the GREIT algorithm to reconstruct
	greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
	greit.setup(p=0.2, lamb=0.01, n=n_pix)
	h_mat = greit.H
	reconstruction = greit.solve(v_meas, f.v).reshape(n_pix, n_pix)
	
	# optional: see reconstruction
	'''
	plt.figure(1)
	im1 = plt.imshow(reconstruction, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.title("Reconstruction")
	plt.colorbar(im1)
	plt.figure(2)
	im2 = plt.imshow(true, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.colorbar(im2)
	plt.title("True Image")
	plt.show()
	'''
	return reconstruction, h_mat, v_meas, f.v, true, len(v_meas)
Example #11
0
concatenate = cp.concatenate

sign = cp.sign

argmax = cp.argmax

zeros_like = cp.zeros_like

all = cp.all

var = cp.var

allclose = cp.allclose

# ptp emulation: definition extracted from numpy
ptp = lambda x, axis=None: cp.subtract(cp.amax(x, axis), cp.amin(x, axis))

count_nonzero = cp.count_nonzero

arange = np.arange

sin = cp.sin

cos = cp.cos

isscalar = cp.isscalar

std = cp.std

ceil = cp.ceil
Example #12
0
                Tij_shift_rot[0, :, x, y] = nume / deno

                nume = cp.absolute(temp_dens[:, :, :] -
                                   cp_dens_rot_shift[:, :, :])
                nume = cp.sum(nume, axis=(1, 2))
                deno = cp.absolute(temp_dens[:, :, :] +
                                   cp_dens_rot_shift[:, :, :])
                deno = cp.sum(deno, axis=(1, 2))
                Tij_shift_rot[1, :, x, y] = nume / deno

    #	print(Tij_shift_rot[0,0,i_shift,i_shift])
        Tij_shift_rot[:, i, :, :] = 100.0
        #	Tij_shift_rot=cp.where(Tij_shift_rot==0,100,Tij_shift_rot)
        #	print(Tij_shift_rot[0,0,i_shift,i_shift])

        Tij[i, :] = cp.amin(Tij_shift_rot, axis=(0, 2, 3))

    #index=cp.unravel_index(cp.argmin(Tij), Tij.shape)
    index = cp.argmin(Tij)
    index_0 = index // sta_dens
    index_1 = index % sta_dens

    #print(cp.amin(Tij))
    #print(index)
    #print(index_0)
    #print(index_1)
    #print(Tij[index_0,index_1])
    #print(Tij[index_1,index_0])

    #csv file�쐬
    with open(csv_path, mode='w') as log:
    def General_n_Balance_n_Collision(self,
                                      _new_path,
                                      length_only = True,
                                      GPU_accelerating = False,
                                      GPU_accelerating_data = None,
                                      matrix_data = None):
        ITC = {}
        max_ITC = 1
        min_ITC = sys.maxsize
        total_cost = 0
        max_order = 0
        total_order = 0

        standard_index = self.tools.GetWidth()**2 + self.tools.GetHeight()**2

        # Parallelization
        if GPU_accelerating and length_only:
            n_AGV, population_size = GPU_accelerating_data

            T_matrix, S_matrix = matrix_data

            T_matrix = cp.array(T_matrix)
            S_matrix = cp.array(S_matrix)

            ITC_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[1],[1]])), (population_size, n_AGV))
            O_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[0],[1]])), (population_size, n_AGV))
            TC_matrix = cp.reshape(cp.dot(ITC_matrix, cp.ones((n_AGV, 1))), (population_size))
            TO_matrix = cp.reshape(cp.dot(O_matrix, cp.ones((n_AGV, 1))), (population_size))

            max_ITC_matrix = cp.amax(ITC_matrix, axis=1)
            min_ITC_matrix = cp.amin(ITC_matrix, axis=1)
            max_order_matrix = cp.amax(O_matrix, axis=1)

            _, n_order_points, _  = S_matrix.shape
            
            t_m = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            x_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[1],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            y_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[1],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            
            d_m = cp.sum(cp.sqrt(cp.square(t_m - cp.transpose(t_m, (0, 2, 1)))
                                 + cp.square(x_m - cp.transpose(x_m, (0, 2, 1)))
                                 + cp.square(y_m - cp.transpose(y_m, (0, 2, 1)))),
                         (1,2))

            d_m_max = cp.multiply(cp.sqrt(cp.add(cp.square(cp.subtract(cp.amax(t_m, (1, 2)),
                                                                       cp.amin(t_m, (1, 2)))),
                                                 standard_index)),
                                  (n_order_points**2))
            
            G1 = max_order_matrix/max_ITC_matrix
            G2 = TO_matrix/TC_matrix
            BU = min_ITC_matrix/max_ITC_matrix
            CI = cp.multiply(d_m/d_m_max, BU)
            
            E_matrix = G1 + G2 + BU + CI
            
            cp.cuda.Stream.null.synchronize()

            return (list(E_matrix), (list(max_ITC_matrix), list(TC_matrix), list(BU), list(CI)))

        # Non-Paralleization
        else:
            for each_AGV_ID in _new_path.keys():
                each_AGV_len_schedule = 0
                each_AGV_num_orders = 0

                if length_only:
                    each_AGV_len_schedule, each_num_order, each_order_list = _new_path[each_AGV_ID]
                    each_AGV_num_orders = each_num_order
                else:
                    each_path = _new_path[each_AGV_ID]
                    for each_pos_path in each_path:
                        if len(each_pos_path) == 3:
                            each_AGV_num_orders += 1
                        each_AGV_len_schedule += 1
                    
                cost = each_AGV_len_schedule + each_AGV_num_orders
                ITC[each_AGV_ID] = cost
                
                if each_AGV_num_orders > max_order:
                    max_order = each_AGV_num_orders
                total_order += each_AGV_num_orders
        
            for _, each_value in ITC.items():
                
                if each_value > max_ITC:
                    max_ITC = each_value
                if each_value < min_ITC:
                    min_ITC = each_value
                total_cost += each_value

            TT = max_ITC
            TTC = total_cost
            BU = min_ITC / max_ITC
            CI = self.eval_collision.Update(_new_path, length_only) * BU
            
            G1 = max_order/TT
            G2 = total_order/TTC
            
            value = G1 + G2 + BU + CI
            return (value, (TT, TTC, BU, CI))
    def General_n_Balance_n_Collision_Eff(self,
                                      _new_path,
                                      length_only = True,
                                      GPU_accelerating = False,
                                      GPU_accelerating_data = None,
                                      matrix_data = None):
        ITC = {}
        max_ITC = 1
        min_ITC = sys.maxsize
        total_cost = 0
        max_order = 0
        total_order = 0

        standard_index = self.tools.GetWidth()**2 + self.tools.GetHeight()**2

        # Parallelization
        if GPU_accelerating and length_only:
            n_AGV, population_size = GPU_accelerating_data

            T_matrix, S_matrix = matrix_data

            T_matrix = cp.array(T_matrix)
            S_matrix = cp.array(np.array(S_matrix).astype(float))

            ITC_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[1],[1]])), (population_size, n_AGV))
            O_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[0],[1]])), (population_size, n_AGV))
            TC_matrix = cp.reshape(cp.dot(ITC_matrix, cp.ones((n_AGV, 1))), (population_size))
            TO_matrix = cp.reshape(cp.dot(O_matrix, cp.ones((n_AGV, 1))), (population_size))

            max_ITC_matrix = cp.amax(ITC_matrix, axis=1)
            min_ITC_matrix = cp.amin(ITC_matrix, axis=1)
            max_order_matrix = cp.amax(O_matrix, axis=1)
            
            _, n_order_points, _  = S_matrix.shape
            
            t_m = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            x_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[1],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            y_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[1],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            l_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[1],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            o_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[0],[1]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            t_m_l = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]])),
                               (population_size, n_order_points))

            t_m_diff = t_m - cp.transpose(t_m, (0, 2, 1))
            x_m_diff = x_m - cp.transpose(x_m, (0, 2, 1))
            y_m_diff = y_m - cp.transpose(y_m, (0, 2, 1))

            m_xy_diff = cp.absolute(x_m_diff) + cp.absolute(y_m_diff)

            m_diff = cp.absolute(t_m_diff) + m_xy_diff
            
            m_diff_l = m_diff - l_m * 2
            
            m_diff_l_sign = (cp.logical_xor(cp.sign(m_diff_l) + 1, True))

            m_diff_l_eff = cp.multiply(m_diff, m_diff_l_sign)

            m_diff_l_sign = cp.sign(m_diff_l_eff)

            m_diff_l_H = cp.multiply(cp.multiply(cp.reciprocal(m_diff_l_eff + m_diff_l_sign - 1), m_diff_l_sign),
                                     cp.log10(m_diff_l_eff + cp.absolute(m_diff_l_sign - 1)))
            
            d_m = cp.reciprocal(cp.sum(m_diff_l_H,
                                       (1,2)))

            # Occupancy test
            """
            t_m_o = t_m + o_m - 1
            m_diff_o = cp.absolute(t_m_o - cp.transpose(t_m_o, (0, 2, 1))) - o_m - 1
            m_occupancy = (cp.logical_xor(cp.sign(m_diff_o) + 1, True))
            
            m_idn = cp.identity(n_order_points)
            OT = cp.prod(cp.logical_or(m_xy_diff,
                                       cp.logical_not(m_occupancy - m_idn)),
                         (1,2))
            """
            
            G1 = max_order_matrix/max_ITC_matrix
            G2 = TO_matrix/TC_matrix
            BU = min_ITC_matrix/max_ITC_matrix
            CI = cp.multiply(d_m, BU) # d_m * 0.1
            
            E_matrix = G1 + G2 + BU + CI
            
            cp.cuda.Stream.null.synchronize()

            return (list(E_matrix), (list(max_ITC_matrix), list(TC_matrix), list(BU), list(CI)))

        # Non-Paralleization
        else:
            print("[Scheduling] Must be use GPU to calculate")
            
            for each_AGV_ID in _new_path.keys():
                each_AGV_len_schedule = 0
                each_AGV_num_orders = 0

                if length_only:
                    each_AGV_len_schedule, each_num_order, each_order_list = _new_path[each_AGV_ID]
                    each_AGV_num_orders = each_num_order
                else:
                    each_path = _new_path[each_AGV_ID]
                    for each_pos_path in each_path:
                        if len(each_pos_path) == 3:
                            each_AGV_num_orders += 1
                        each_AGV_len_schedule += 1
                    
                cost = each_AGV_len_schedule + each_AGV_num_orders
                ITC[each_AGV_ID] = cost
                
                if each_AGV_num_orders > max_order:
                    max_order = each_AGV_num_orders
                total_order += each_AGV_num_orders
        
            for _, each_value in ITC.items():
                
                if each_value > max_ITC:
                    max_ITC = each_value
                if each_value < min_ITC:
                    min_ITC = each_value
                total_cost += each_value

            TT = max_ITC
            TTC = total_cost
            BU = min_ITC / max_ITC
            CI = 0
            
            G1 = max_order/TT
            G2 = total_order/TTC
            
            value = G1 + G2 + BU + CI
            return (value, (TT, TTC, BU, CI))
            A_prime_np = cp.asnumpy(A_prime)
            B_j_prime_np = cp.asnumpy(B_j_prime)
            X = A_prime_np.T.dot(C_pca_vec)
            Y_j = B_j_prime_np.T.dot(C_pca_vec)
            # project C to PCA
            C_j_prime_np = cp.asnumpy(C_j_prime)
            Z_j = C_j_prime_np.dot(C_pca_vec)
            # Step 3a: standarlization of anomalies
            Z_j_sd = Z_j.std(axis=0)
            X_prime = X/Z_j_sd
            Y_j_prime = Y_j/Z_j_sd
            for int_loop in range(0, len(dist_id_sel)):
                distances[int_loop, ref_dist] = cp.linalg.norm(
                    X_prime[int_loop]-Y_j_prime)
                print(ref_dist, int_loop, B_ctrl, mdl_c)
        min_dist_val = cp.amin(distances, axis=0)
        min_dist_ind = []
        for min_ind in range(0, min_dist_val.shape[0]):
            min_dist_ind = np.append(min_dist_ind, cp.where(
                distances[:, min_ind] == cp.amin(min_dist_val[min_ind])))
            #print("Future", dist_names_valid[min_ind], "is analogous to current", dist_names_valid[int(locals()[mdl+'min_dist_ind'][min_ind])], file=f)
            print(dist_names_valid[min_ind], file=f_f_d_na)
            print(min_ind, file=f_f_d_id)
            print(dist_names_valid[int(min_dist_ind[min_ind])], file=f_c_d_na)
            print(min_dist_ind[min_ind], file=f_c_d_id)
            print(mdl, file=f_mdl)
        B_ctrl += 1
    mdl_c += 1

f.close()
f_f_d_na.close()
def eegstats(signals, samples, statistic):

    import cupy as cp
    from scipy.stats import skew, kurtosis

    if statistic == 'mean':
        means = cp.zeros(samples)
        for i in range(len(signals)):
            means[i] = cp.mean(signals[i])
        return means

    elif statistic == 'std':
        std = cp.zeros(samples)
        for i in range(len(signals)):
            std[i] = cp.std(signals[i])
        return std

    elif statistic == 'skewness':
        skewness = cp.zeros(samples)
        for i in range(len(signals)):
            skewness[i] = skew(signals[i])
        return skewness

    elif statistic == 'kurtosis':
        kurt = cp.zeros(samples)
        for i in range(len(signals)):
            kurt[i] = kurtosis(signals[i])
        return kurt

    elif statistic == 'maximum':
        maxim = cp.zeros(samples)
        for i in range(len(signals)):
            maxim[i] = cp.amax(signals[i])
        return maxim

    elif statistic == 'minimum':
        minim = cp.zeros(samples)
        for i in range(len(signals)):
            minim[i] = cp.amin(signals[i])
        return minim
    ########
    elif statistic == 'n5':
        n5 = cp.zeros(samples)
        for i in range(len(signals)):
            n5[i] = cp.percentile(cp.asarray(signals[i]), 5)
        return n5

    elif statistic == 'n25':
        n25 = cp.zeros(samples)
        for i in range(len(signals)):
            n25[i] = cp.percentile(cp.asarray(signals[i]), 25)
        return n25

    elif statistic == 'n75':
        n75 = cp.zeros(samples)
        for i in range(len(signals)):
            n75[i] = cp.percentile(cp.asarray(signals[i]), 75)
        return n75

    elif statistic == 'n95':
        n95 = cp.zeros(samples)
        for i in range(len(signals)):
            n95[i] = cp.percentile(cp.asarray(signals[i]), 95)
        return n95

    elif statistic == 'median':
        median = cp.zeros(samples)
        for i in range(len(signals)):
            median[i] = cp.percentile(cp.asarray(signals[i]), 50)
        return median

    elif statistic == 'variance':
        variance = cp.zeros(samples)
        for i in range(len(signals)):
            variance[i] = cp.var(cp.asarray(signals[i]))
        return variance

    elif statistic == 'rms':
        rms = cp.zeros(samples)
        for i in range(len(signals)):
            rms[i] = cp.mean(cp.sqrt(cp.asarray(signals[i])**2))
        return rms
def bptt(x2, y2, iteration, local_time, region, isFirst, timestamp,
         satellite_name):
    x = cp.asarray(x2)
    y = cp.asarray(y2)

    global connected_weights
    global main_kernel
    global bias_y
    global e_kernel
    global learning_rate
    global v_connected_weights
    global bias_h
    global bias_e
    global bias_v

    # Perform forward prop

    global net_loss
    global learning_rate
    global learning_rate_counter

    #CHANGE
    prediction, pre_sigmoid_prediction, hidden_prediction, p, h, s, e, alpha, xtemp = forward_prop(
        x, local_time, 0, False, timestamp, "SAME")

    #Any NDVI with 0 is water, and will remain water. NDVI is only applicable to vegatation, thus just make the prediction 0 at every point the previous NDVI is 0.
    loss = calculate_loss(prediction, y[0])
    print("LOSS BEFORE: ")
    print(loss)
    # Calculate loss with respect to final layer
    dLdy_2 = loss_derivative(prediction, y[0])
    # Calculate loss with respect to pre sigmoid layer
    dLdy_1 = cp.multiply(rect_linear_derivative(pre_sigmoid_prediction),
                         dLdy_2)
    # Calculate loss with respect to last layer of lstm
    testArr = cp.reshape(
        cp.matmul(cp.transpose(connected_weights), dLdy_1.reshape(1, M * N)),
        (channels_hidden, M, N))
    dLdh = testArr  # initial value of dLdh

    dLdw_0 = cp.matmul(dLdy_1.reshape(1, M * N),
                       hidden_prediction.transpose(1, 0))

    # Calculate loss with respect to bias y
    dLdb_y = dLdy_1
    #--------------------fully connected------------------
    bias_y = bias_y - learning_rate * dLdb_y
    connected_weights = connected_weights - learning_rate * dLdw_0

    # Initialize weight matrices
    dLdW = cp.zeros([
        channels_hidden, channels_p + channels_img + channels_hidden,
        kernel_dimension, kernel_dimension
    ])
    dLdW_v = cp.zeros([channels_hidden * M * N])
    dLdW_e = cp.zeros([
        channels_hidden, channels_img + channels_hidden, kernel_dimension,
        kernel_dimension
    ])

    # initialize biases
    dLdb_e = cp.zeros([channels_hidden, M, N])
    dLdb_h = cp.zeros([channels_hidden, M, N])
    dLdb_v = cp.zeros([distance_forward])

    for t in cp.arange(local_time - 1, -1, -1):
        dLdh = cp.multiply(dLdh, (cp.ones(
            (channels_hidden, M, N)) - cp.multiply(h[t], h[t])))  #dLdh_hat

        temporary = cp.concatenate(
            (x[t], p[t], h[t - 1]),
            axis=0).reshape(channels_hidden + channels_img + channels_p, 1, M,
                            N)

        dLdI = cp.asarray(
            F.convolution_2d(
                dLdh.reshape(1, channels_hidden, M, N),
                main_kernel.transpose(1, 0, 2, 3),
                b=None,
                pad=2)[0].data)  # reshape into flipped kernel dimensions
        dLdW_temp = cp.asarray(
            (F.convolution_2d(temporary,
                              dLdh.reshape(channels_hidden, 1, M, N),
                              b=None,
                              pad=2).data).transpose(
                                  1, 0, 2, 3))  #reshape into kernel dimensions

        #create dLdp, which is the derivative of loss with respect to p
        dLdp = dLdI[channels_img:channels_img + channels_p]
        #------------------------------------------ATTENTION BACKPROPAGATION CODE-------------------------------------
        dLdAlpha = cp.zeros(distance_forward)
        for k in range(0, distance_forward):
            dLdAlpha[k] = cp.sum(dLdp * cp.asarray(xtemp[k]))

        dLde = dLdAlpha * softmax_derivative(e[t])
        dLdW_v_temp = cp.zeros([channels_hidden * M * N])
        dLdW_e_temp = cp.zeros([
            channels_hidden, channels_img + channels_hidden, kernel_dimension,
            kernel_dimension
        ])
        dLdh_temp = cp.zeros([channels_hidden, M, N])
        for k in range(0, distance_forward):
            dLdW_v_temp += dLde[k] * s[t][k].reshape((M * N * channels_hidden))
            dLds = dLde[
                k] * v_connected_weights  # Changes each iteration of nested loop
            dLds = dLds.reshape((channels_hidden, M, N))
            dLds = cp.multiply(dLds, (cp.ones(
                (channels_hidden, M, N)) - cp.multiply(s[t][k], s[t][k])))
            temp3 = cp.concatenate((cp.asarray(
                satellite_images[region][k + timestamp - distance]), h[t - 1]),
                                   axis=0)

            dLdI_e = cp.asarray(
                F.convolution_2d(dLds.reshape(1, channels_hidden, M, N),
                                 e_kernel.transpose(1, 0, 2, 3),
                                 b=None,
                                 pad=pad_constant)
                [0].data)  # reshape into flipped kernel dimensions
            dLdW_e_temp += cp.asarray(
                (F.convolution_2d(temp3.reshape(channels_hidden + channels_img,
                                                1, M, N),
                                  dLds.reshape(channels_hidden, 1, M, N),
                                  b=None,
                                  pad=pad_constant).data).transpose(
                                      1, 0, 2,
                                      3))  #reshape into kernel dimensions
            dLdh_temp += dLdI_e[channels_img:channels_img + channels_hidden]
            if cp.amax(dLds) > 1 or cp.amin(dLds) < -1:
                dLds = dLds / cp.linalg.norm(dLds)
            dLdb_e += dLds

        #---------------------------------------------UPDATE DERIVATIVES-------------------------------------
        dLdW += dLdW_temp
        dLdb_h += dLdh
        dLdb_v += dLde.reshape([distance_forward])
        # Reinitialize
        dLdh = dLdI[channels_img + channels_p:channels_img + channels_p +
                    channels_hidden]

    #Clip all gradients again
    if cp.linalg.norm(dLdW) > clip_threshold:
        dLdW = dLdW * clip_threshold / cp.linalg.norm(dLdW)
    if cp.linalg.norm(dLdW_e) > clip_threshold:
        dLdW_e = dLdW_e * clip_threshold / cp.linalg.norm(dLdW_e)
    if cp.linalg.norm(dLdW_v) > clip_threshold:
        dLdW_v = dLdW_v * clip_threshold / cp.linalg.norm(dLdW_v)
    if cp.linalg.norm(dLdb_h) > clip_threshold:
        dLdb_h = dLdb_h * clip_threshold / cp.linalg.norm(dLdb_h)
    if cp.linalg.norm(dLdb_e) > clip_threshold:
        dLdb_e = dLdb_e * clip_threshold / cp.linalg.norm(dLdb_e)
    if cp.linalg.norm(dLdb_v) > clip_threshold:
        dLdb_v = dLdb_v * clip_threshold / cp.linalg.norm(dLdb_v)

    #---------------------------------------UPDATE WEIGHTS----------------------------------
    #---------------------update main kernel---------
    main_kernel = main_kernel - learning_rate * dLdW
    #---------------------update e kernel---------
    e_kernel = e_kernel - learning_rate * dLdW_e
    #---------------------update v_connected_weights---------
    v_connected_weights = v_connected_weights - learning_rate * dLdW_v
    #--------------------update bias h-----------------------
    bias_h = bias_h - learning_rate * dLdb_h
    #--------------------update bias e-----------------------
    bias_e = bias_e - learning_rate * dLdb_e
    #--------------------update bias v-----------------------
    bias_v = bias_v - learning_rate * dLdb_v

    prediction2, pre_sigmoid_prediction2, hidden_prediction2, p2, h2, s2, e2, alpha2, xtemp2 = forward_prop(
        x, local_time, 0, False, timestamp, "SAME")
    loss2 = calculate_loss(prediction2, y[0])
    print("LOSS AFTER: ")
    print(loss2)
    loss3 = calculate_loss(prediction2, y[0])
    rms3 = rootmeansquare(unnormalize_cp(prediction2, ndviMean, ndviStdDev),
                          unnormalize_cp(y[0], ndviMean, ndviStdDev))
    print("LOSS AFTER WATER: ")
    print(loss3)
    f2 = open("loss.txt", "a")
    f2.write(str(rms3) + "\n")

    learning_rate_counter += 1
    net_loss += (loss2 - loss)
    if learning_rate_counter == 10:
        print(
            "----------------------------NET LOSS OF 10 EXAMPLES-----------------------------"
        )
        print(net_loss)
        learning_rate_counter = 0
        #if net_loss > 0:
        #learning_rate = learning_rate * 0.8
        net_loss = 0

    print("backpropagation complete")
Example #18
0
def H_ev_vec(x, W):
    H = create_H_vec_cp(x, W)
    w = cp.asarray(np.linalg.eigvalsh(cp.asnumpy(H)))
    return cp.amin(w)