def predict(self, X):
        """
		This method makes predictions on test data 'X'.

		Parameters
		----------
		X : numpy array 
			N x M numpy array; N = number of data points; M = number of 
			features.
		"""
        L = len(self.wts)
        Z = arr(concat((np.ones((mat(X).shape[0], 1)), mat(X)),
                       axis=1))  # initialize to input features + constant

        for l in range(L - 2):
            Z = arr(
                mat(Z) *
                mat(self.wts[l]).T)  # compute linear response of next layer
            Z = arr(
                concat((np.ones((mat(Z).shape[0], 1)), mat(self.sig(Z))),
                       axis=1))  # initialize to input features + constant

        Z = arr(mat(Z) *
                mat(self.wts[L - 1].T))  # compute output layer linear response
        return self.sig_0(Z)  # output layer activation function
Пример #2
0
 def split(self, X, y, groups=None):
     """Generate indices to split data into training and test set.
     Parameters
     ----------
     X : array-like of shape (n_samples, n_features)
         Training data, where n_samples is the number of samples
         and n_features is the number of features.
     y : array-like of shape (n_samples,), default=None
         The target variable for supervised learning problems.
     groups : array-like of shape (n_samples,)
         Group labels for the samples used while splitting the dataset into
         train/test set.
     Yields
     ------
     train : ndarray
         The training set indices for that split.
     test : ndarray
         The testing set indices for that split.
     """
     X, y, groups = indexable(X, y, groups)
     indices = np.arange(_num_samples(X))
     train_targets = {t: [] for t in np.unique(y)}
     test_targets = {t: [] for t in np.unique(y)}
     for target in targets.keys():
         for test_i in self._iter_test_masks(X, y, groups, target):
             train_targets[target].append(indices[np.logical_not(test_i)])
             test_targets[target].append(indices[test_i])
     train_index = np.concat(tuple(map(np.array, train_targets.values())))
     test_index = np.concat(tuple(map(np.array, test_targets.values())))
     yield train_index, test_index
Пример #3
0
    def tile_sample(image, tile_shape):
        height, width, n_channels = image.shape

        hangover = width % tile_shape[1]
        if hangover != 0:
            pad_amount = tile_shape[1] - hangover
            pad_shape = (height, pad_amount)
            padding = np.zeros(pad_shape)
            image = np.concat([image, padding], axis=2)

        hangover = height % tile_shape[0]
        if hangover != 0:
            pad_amount = tile_shape[0] - hangover
            pad_shape = list(image.shape)
            pad_shape[1] = pad_amount
            padding = np.zeros(pad_shape)
            image = np.concat([image, padding], axis=1)

        pad_height = tile_shape[0] - height % tile_shape[0]
        pad_width = tile_shape[1] - width % tile_shape[1]
        image = np.pad(image, ((0, pad_height), (0, pad_width), (0, 0)),
                       'constant')

        H = int(height / tile_shape[0])
        W = int(width / tile_shape[1])

        slices = np.split(image, W, axis=1)
        new_shape = (H, *tile_shape, n_channels)
        slices = [np.reshape(s, new_shape) for s in slices]
        new_images = np.concatenate(slices, axis=1)
        new_images = new_images.reshape(H * W, *tile_shape, n_channels)
        return new_images
Пример #4
0
def predict_data(model, data, steps):
    it = iter(data)
    x_test, y_test = zip(*[next(it) for _ in range(steps)])
    yh_test = [model.predict(x) for x in x_test]
    x_test = [np.concat(x) for x in zip(*x_test)]
    y_test = np.concat(y_test).squeeze()
    yh_test = np.concat(yh_test).squeeze()
    return x_test, y_test, yh_test
Пример #5
0
    def add_shape(self,
                  vertices,
                  normals,
                  triangles,
                  color,
                  atoms=None,
                  description=None):
        """Add shape to drawing

        Parameters
        ----------
        vertices : :py:class:`numpy.array` of coordinates
        normals : :py:class:`numpy.array` of normals, one per vertex
        triangles : :py:class:`numpy.array` of vertex indices, multiple of 3
        color : either a single 4 element uint8 :py:class:`numpy.array`;
            or an array of those values, one per vertex
        atoms : a sequence of :py:class:`~chimerax.atomic.Atom`s
            or an :py:class:`~chimerax.atomic.Atoms` collection.
        description : a string describing the shape

        The vertices, normals, and triangles can be custom or the results
        from one of the :py:mod:`~chimerax.surface`'s geometry functions.
        If the description is not given, it defaults to a list of the atoms.
        """
        # extend drawing's vertices, normals, vertex_colors, and triangles
        # atoms is a molarray.Atoms collection
        # description is what shows up when hovered over
        asarray = numpy.asarray
        concat = numpy.concatenate
        if color.ndim == 1 or color.shape[0] == 1:
            colors = numpy.empty((vertices.shape[0], 4), dtype=numpy.uint8)
            colors[:] = color
        else:
            colors = color.asarray(color, dtype=numpy.uint8)
            assert colors.shape[1] == 4 and colors.shape[0] == vertices.shape[0]
        if self.vertices is None:
            if atoms is not None:
                self._add_handler_if_needed()
            self.set_geometry(asarray(vertices, dtype=numpy.float32),
                              asarray(normals, dtype=numpy.float32),
                              asarray(triangles, dtype=numpy.int32))
            self.vertex_colors = colors
            s = _AtomicShape(range(0, self.triangles.shape[0]), description,
                             atoms)
            self._shapes.append(s)
            return
        offset = self.vertices.shape[0]
        start = self.triangles.shape[0]
        new_vertex_colors = concat((self.vertex_colors, colors))
        self.set_geometry(
            asarray(concat((self.vertices, vertices)), dtype=numpy.float32),
            asarray(concat((self.normals, normals)), dtype=numpy.float32),
            asarray(concat((self.triangles, triangles + offset)),
                    dtype=numpy.int32))
        self.vertex_colors = new_vertex_colors
        s = _AtomicShape(range(start, self.triangles.shape[0]), description,
                         atoms)
        self._shapes.append(s)
Пример #6
0
    def expand_buffer(self, block_size=None):
        if block_size is not None:
            self.block_size = block_size

        self.value_history = np.concat(self.value_history,
                                       np.empty(self.block_size))
        self.ratio_history = np.concat(self.ratio_history,
                                       np.empty(self.block_size))
        self.max_history_size += self.block_size
Пример #7
0
def opponent_colour(mat):
    r = mat[:, :, 0]
    g = mat[:, :, 1]
    b = mat[:, :, 2]
    #probably a better way to do this
    I = r + g + b
    RG = r - g
    BY = b - (r + g) / 2
    # hopefully this concat works!
    return np.concat(np.concat(I, RG), BY)
 def predict(self, img_input):
     init_attention_map = np.zeros(img_input.shape[:-1] + (1, ))
     init_attention_map.fill(0.5)
     init_cell_state = np.zeros(img_input.shape[:-1] + (32, ))
     zeros_mask = np.zeros(img_input.shape[:-1] + (1, ))
     attention_map, lstm_feats, attention_maps = self.attentive_rnn.predict(
         [img_input, init_attention_map, init_cell_state, zeros_mask])
     np.concat([attention_map, img_input], axis=-1)
     skip_output_1, skip_output_2, skip_output_3 = self.autoencoder.predict(
         auto_encoder_input)
     return skip_output_3
Пример #9
0
  def get_batch(self, batch_size):
    probs  = self.priorities ** self.per_alpha
    probs /= probs.sum()

    self.indices = np.random.choice(self.capacity, batch_size, p=probs)
    samples  = [self.buffer[idx] for idx in self.indices]
    # Importance Sampling reweighting
    weights  = (len(self.buffer) * probs[self.indices]) ** (-self.per_beta)
    weights /= weights.max()
    self.weights = np.array(weights, dtype=np.float32)

    state, action, reward, next_state, done = zip(*samples)
    return concat(state), action, reward, concat(next_state), done
Пример #10
0
def smooth_left_right(inputMap, minFrag=500, boundary_frags=True):

    from numpy import array, concatenate as concat, sum

    if len(inputMap.frags) < 2:
        return inputMap

    frags = np.array(inputMap.frags)

    # Remove boundary fragments - ignore them while smoothing
    if boundary_frags:

        bleft = frags[0]
        bright = frags[-1]
        frags = frags[1:-1]

    while True:

        num_frags = frags.shape[0]
        if num_frags < 2:
            break

        i = np.argmin(frags)
        f = frags[i]
        last_ind = num_frags - 1

        if f > minFrag:
            break
        if (i == 0):
            # This is the first frag, merge right
            frags = concat(([frags[0] + frags[1]], frags[2:]))
        elif (i == last_ind):
            # This is the laste frag, merge left
            frags = concat((frags[:-2], [frags[-1] + frags[-2]]))
        else:
            # This is an interior frag, merge with min
            left_frag = frags[i - 1]
            right_frag = frags[i + 1]
            if left_frag < right_frag:
                frags = concat(
                    (frags[0:i - 1], [frags[i - 1] + frags[i]], frags[i + 1:]))
            else:
                frags = concat(
                    (frags[0:i], [frags[i] + frags[i + 1]], frags[i + 2:]))

    # Reattach boundary fragments
    if boundary_frags:
        frags = concat(([bleft], frags, [bright]))

    output_map = MalignerMap(frags=frags, mapId=inputMap.mapId)
    return output_map
Пример #11
0
def smooth_left_right(inputMap, minFrag=500, boundary_frags = True):

    from numpy import array, concatenate as concat, sum

    if len(inputMap.frags) < 2:
        return inputMap

    frags = np.array(inputMap.frags)

    # Remove boundary fragments - ignore them while smoothing
    if boundary_frags:

        bleft = frags[0]
        bright = frags[-1]
        frags = frags[1:-1]


    while True:

        num_frags = frags.shape[0]
        if num_frags < 2:
            break

        i = np.argmin(frags)
        f = frags[i]
        last_ind = num_frags - 1

        if f > minFrag:
            break
        if (i == 0):
            # This is the first frag, merge right
            frags = concat(([frags[0] + frags[1]], frags[2:]))
        elif (i == last_ind):
            # This is the laste frag, merge left
            frags = concat((frags[:-2], [frags[-1] + frags[-2]]))
        else:
            # This is an interior frag, merge with min
            left_frag = frags[i-1]
            right_frag = frags[i+1]
            if left_frag < right_frag:
                frags = concat((frags[0:i-1], [frags[i-1] + frags[i]], frags[i+1:]))
            else:
                frags = concat((frags[0:i], [frags[i] + frags[i+1]], frags[i+2:]))
    
    # Reattach boundary fragments
    if boundary_frags:
        frags = concat(([bleft], frags, [bright]))

    output_map = MalignerMap(frags = frags, mapId = inputMap.mapId)
    return output_map
Пример #12
0
    def add_shapes(self, shape_info):
        """Add multiple shapes to drawing

        Parameters
        ----------
        shape_info: sequence of :py:class:`AtomicShapeInfo`

        There must be no initial geometry.
        """
        from numpy import empty, float32, int32, uint8, concatenate as concat
        num_shapes = len(shape_info)
        all_vertices = [None] * num_shapes
        all_normals = [None] * num_shapes
        all_triangles = [None] * num_shapes
        all_colors = [None] * num_shapes
        all_shapes = [None] * num_shapes
        num_vertices = 0
        num_triangles = 0
        has_atoms = False
        for i, info in enumerate(shape_info):
            vertices, normals, triangles, color, atoms, description = info
            all_vertices[i] = vertices
            all_normals[i] = normals
            all_triangles[i] = triangles + num_vertices
            if color.ndim == 1 or color.shape[0] == 1:
                colors = empty((vertices.shape[0], 4), dtype=uint8)
                colors[:] = color
            else:
                colors = color.asarray(color, dtype=uint8)
                assert colors.shape[1] == 4 and colors.shape[
                    0] == vertices.shape[0]
            all_colors[i] = colors
            has_atoms = has_atoms or (atoms is not None)
            new_num_triangles = num_triangles + len(triangles)
            all_shapes[i] = _AtomicShape(
                range(num_triangles, new_num_triangles), description, atoms)
            num_vertices += len(vertices)
            num_triangles = new_num_triangles
        if has_atoms:
            self._add_handler_if_needed()
        vertices = empty((num_vertices, 3), dtype=float32)
        normals = empty((num_vertices, 3), dtype=float32)
        triangles = empty((num_triangles, 3), dtype=int32)
        self.set_geometry(concat(all_vertices, out=vertices),
                          concat(all_normals, out=normals),
                          concat(all_triangles, out=triangles))
        self.vertex_colors = concat(all_colors)
        self._shapes = all_shapes
Пример #13
0
def vstack(v):
    if len(v) == 0:
        return np.array([])
    if v[0].ndim == 1:
        return np.concat(v)
    else:
        return np.vstack(v)
Пример #14
0
def rbind(*dfs):

    # Add assertion for all sets of columns being equal

    result = pd.concat(dfs[0], axis=0, ignore_index=True)

    return result
Пример #15
0
    def cap_n_bound(self, traces, bound, Tg, decay=False):
        """ take 2d array and set values >= upper boundary as np.nan
        return dataframe of capped ndarray, assumes all traces meet
        the criteria trials[any(val >= a)]

        ::Arguments::
              traces (ndarray):
                    go decision traces for single cond
              bound (float):
                    boundary for given condition, all
                    values>=bound --> np.nan
              decay (bool <False>):
                    concatenate mirror image of input array
                    along axis 1 (i.e., bold up, decay down)
        ::Returns::
              traces_df (DataFrame):
                    all go traces
        """

        traces[traces >= bound] = np.nan
        if decay:
            traces_capped = concat((traces, traces[:, ::-1]), axis=1)
        else:
            traces_capped = traces
        traces_df = pd.DataFrame(traces_capped.T)
        traces_df.iloc[Tg:, :] = np.nan
        return traces_df
Пример #16
0
 def set_robot_blk_states(self, rState, bStates):
     """Set simulator internal states
     :param rState: 1D np array (x, y, theta)
     :param bStates: 2D np array [(x, y)]
     """
     state = np.concat((rState, bStates.flat))
     return self.set_state(state)
Пример #17
0
def better_matching(workers_vec, contract_vec):
    _, UN = contract_vec.shape
    assert _ == 1
    _, WN = workers_vec.shape
    assert _ == 1
    matching = np.zeros((WN, UN))
    probe_matrix = abs(contract_vec - workers_vec.T)
    probe_matrix = abs(contract_vec) - probe_matrix
    assert (WN, UN) == probe_matrix.shape

    if WN == 1:
        hr = workers_vec[0, 0]
        i = np.argmax(probe_matrix)
        matching[0, i] = 1
        target = abs(contract_vec[i] - hr)
        return matching, target

    best_target = 100500100500
    best_matching = None
    for i in range(WN):
        _workers = np.concat([workers_vec[:, :i], workers_vec[:, i + 1:]])
        for j in range(UN):
            _contracts = contract_vec.copy
            _contracts[j] -= workers_vec[i]
            small_target, small_matching = better_matching(
                _workers, _contracts)
            if small_target + 0:
                pass

    return best_matching, best_target
Пример #18
0
def vstack(v):
    if len(v) == 0:
        return np.array([])
    if v[0].ndim == 1:
        return np.concat(v)
    else:
        return np.vstack(v)
Пример #19
0
    def generate_dpm_traces(self):
        """ Get go,ssrt using same function as proactive then use the indices for
        correct go and correct stop trials to slice summed, but momentary, dvg/dvs evidence vectors
        (not accumulated over time just a basic sum of the vectors)
        """
        # ensure parameters are all vectorized
        self.p = self.vectorize_params(self.p)
        Pg, Tg = self.__update_go_process__(self.p)
        Ps, Ts = self.__update_stop_process__(self.p)

        self.bound = self.p['a']
        self.onset = self.p['tr']

        ncond = self.ncond
        ntot = self.ntot
        dx = self.dx
        base = self.base
        self.nss_all = self.nss
        nssd = self.nssd
        ssd = self.ssd
        nss = self.nss_all / self.nssd
        xtb = self.xtb

        get_ssbase = lambda Ts, Tg, DVg: array([[DVc[:nss / nssd, ix] for ix in np.where(Ts < Tg[i], Tg[i] - Ts, 0)] for i, DVc in enumerate(DVg)])[:, :, :, na]

        self.gomoments = xtb[:, na] * np.where((rs((ncond, ntot, Tg.max())).T < Pg), dx, -dx).T
        self.ssmoments = np.where(rs((ncond, nssd, nss, Ts.max())) < Ps, dx, -dx)
        DVg = base[:, na] + xtb[:, na] * np.cumsum(self.gomoments, axis=2)
        self.dvg = DVg[:, :nss_all, :]
        self.dvs = self.get_ssbase(Ts, Tg, DVg) + np.cumsum(self.ssmoments, axis=3)

        dg = self.gomoments[:, nss_all:, :].reshape(ncond, nssd, nss, Tg.max())
        ds = self.ssmoments.copy()

        ss_list, go_list = [], []
        for i, (s, g) in enumerate(zip(ds, dg)):
            diff = Ts - Tg[i]
            pad_go = diff[diff > 0]
            pad_ss = abs(diff[diff < 0])
            go_list.extend([concat((np.zeros((nss, gp)), g[i]), axis=1) for gp in pad_go])
            go_list.extend([g[x] for x in range(len(pad_ss))])

            ss_list.extend([concat((np.zeros((nss, abs(spad))), s[i, :, -(Tg[i] - spad):]), axis=1) for spad in pad_ss])
            ss_list.extend([s[i, :, -tss:] for tss in Ts[:len(pad_go)]])

        r = map((lambda x: np.cumsum((x[0] + x[1]), axis=1)), zip(go_list, ss_list))
Пример #20
0
def interpolate(x0s, x, y, axis=0):
  """
  Interpolate y(x) onto points in x0s using piecewise linear interpolation

  If y is multidimensional then `axis` specifies the axis along which x varies
  """
  y0s = [interpolate1(x0, x, y, axis) for x0 in x0s]
  return np.concat([unsqueeze(y0, y.ndim, axis) for y0 in y0s], axis)
Пример #21
0
def construct_observation(observation, delta):
    '''Construct an observation from metrics plus deltas

	Takes raw values, standardizes, and concats
	'''
    return np.concat(
        [metric_standardize(observation),
         metric_standardize(delta)])
Пример #22
0
def dobeshi(signal):
    S = signal
    n = len(S)

    s1 = S[::2] + sqrt(3) * S[1::2]
    d1 = S[1::2] - sqrt(3)/4 * s1 - (sqrt(3) - 2) / 4 * concat(((s1[-1],), s1[:-1]))
    s2 = s1 - concat((d1[1:n//2], (d1[0],)))

    s = (sqrt(3)-1) / sqrt(2) * s2
    d = -(sqrt(3)+1) / sqrt(2) * d1

    d1 = d * ((sqrt(3)-1)/sqrt(2))
    s2 = s * ((sqrt(3)+1)/sqrt(2))
    s1 = s2 + roll(d1, -1)
    S[1::2] = d1 + sqrt(3)/4*s1 + (sqrt(3)-2)/4*roll(s1, 1)
    S[::2] = s1 - sqrt(3) * S[1::2]

    return d, S
def phase_plane(func, params, fig_name, coords, func_vars = [0,1], bounds = (), \
                 param_tuple = (), fig_title = '', arrows = False, interval_cond = (0, 5000, 100000),
                 traj_type = 'reg', mod_param = 0, xlim = None, ylim = None, xlabel = None, ylabel = None,
                 time_series = False, pplane_dir = '', tseries_dir = ''):
    graph_specifications(graph_type = 'phase plane', xlabel = xlabel, ylabel = ylabel, param_tuple = param_tuple, \
                          xlim = xlim, ylim = ylim, title = fig_title)
    if arrows:
        #creating an array of points corresponding to the 2d grid where vector field arrows are going to be graphed
        corner, r, sampling = bounds
        xy = neighborhood(*bounds)
        t = np.linspace(0, 1, 10)
        phi_integrated_arr = np.empty((10, 2))

        vector_coords = np.repeat([coords[0]], xy.shape[0], axis=0)
        vector_coords[:, func_vars] = xy

        for phi in vector_coords:
            solution = odeint(func, phi, t, args=params)[:, func_vars]
            phi_integrated_arr = concat((phi_integrated_arr, solution), axis=1)

        #finding the direction for the vectors in forms of differences to plot them with plt.arrow
        sol_arr = phi_integrated_arr[:, 2:]
        difference = sol_arr - np.roll(sol_arr, 1, axis=0)
        sol_arr = sol_arr[0]
        difference = difference[1]
        scaling_factor = r / sampling
        diff_normalized = diff_normalization(difference) * scaling_factor * 0.5

        size = sol_arr.size
        sol = sol_arr.reshape((int(size / 2), 2))
        for start, change in zip(sol, diff_normalized):
            plt.arrow(start[0],
                      start[1],
                      change[0],
                      change[1],
                      width=scaling_factor * 0.1,
                      color="#808080")

    phase_plane_traj(func, coords, params, interval_cond, func_vars = func_vars, traj_type = traj_type, \
                     mod_param = mod_param)
    pplane_name = pplane_dir + "PhasePlane_" + fig_name
    plt.savefig("./{}".format(pplane_name))
    plt.show()

    if time_series:
        start, end, sample = interval_cond
        trajectories, t = selected_traj(func, params, coords, interval_cond)
        if traj_type == 'mod':
            traj1 = mod(trajectories[sample - 100:sample, func_vars[0]],
                        mod_param)
        else:
            traj1 = trajectories[sample - 100:sample, func_vars[0]]
        t = t[sample - 100:sample]
        pltime_series(traj1, t, 't', xlabel, fig_name, tseries_dir=tseries_dir)

    return 'vector_field'
    def __init__(self, *args):
        multiples = [np.unique(ids) for ids in args]
        unique = np.concat(multiples)

        if len(multiples) > 1:
            unique = np.unique(unique)
        unique.sort()

        self._index = pd.Index(unique)
        self._dtype = np.min_scalar_type(len(self._index))
	def predict(self, X):
		"""
		This method makes predictions on test data 'X'.

		Parameters
		----------
		X : numpy array 
			N x M numpy array; N = number of data points; M = number of 
			features.
		"""
		L = len(self.wts)
		Z = arr(concat((np.ones((mat(X).shape[0],1)), mat(X)), axis=1))					# initialize to input features + constant

		for l in range(L - 2):
			Z = arr(mat(Z) * mat(self.wts[l]).T)										# compute linear response of next layer
			Z = arr(concat((np.ones((mat(Z).shape[0],1)), mat(self.sig(Z))), axis=1))	# initialize to input features + constant

		Z = arr(mat(Z) * mat(self.wts[L - 1].T))										# compute output layer linear response
		return self.sig_0(Z)															# output layer activation function
Пример #26
0
def combine_meshes(meshes):
    """
    Combine the iterable of (vertices, faces) into a single (vertices, faces).

    Args:
        meshes: iterable of (vertices, faces)

    Returns:
        vertices: (nv, 3) float numpy array of vertex coordinates
        faces: (nf, 3) int numpy array of face vertex indices.
    """
    vertices = []
    faces = []
    nv = 0
    for v, f in meshes:
        vertices.append(v)
        faces.append(f + nv)
        nv += len(v)
    return np.concat(vertices, axis=0), np.concat(faces, axis=0)
	def __responses(self, wts, X, sig, sig_0):
		"""
		This is a helper method that gets the linear sum from previous layer
		(A) and saturated activation responses (Z) for a data point. Used in:
			train
		"""
		L = len(wts) + 1
		A = [None for i in range(L)]
		Z = arr([None for i in range(L)], dtype=object).ravel()
		A[0] = arr([1])
		Z[0] = arr(concat((np.ones((mat(X).shape[0],1)), mat(X)), axis=1))				# compute linear combination of inputs

		for l in range(1, L - 1):
			A[l] = arr(mat(Z[l - 1]) * mat(wts[l - 1]).T)								# compute linear combination of previous layer
			Z[l] = arr(concat((np.ones((mat(X).shape[0],1)), mat(sig(A[l]))), axis=1))	# pass through activation function and add constant feature

		A[L - 1] = arr(mat(Z[L - 2]) * mat(wts[L - 2]).T)
		Z[L - 1] = sig_0(A[L - 1])														# output layer

		return (A,Z)
Пример #28
0
 def fit(self, x, y):
     one = np.array([1])
     if self.W is None:
         self.shapes[0] = (x.shape[1] + 1, self.shapes[0][1])
         self.W = [
             np.random.rand(*shape).T / shape[0] for shape in self.shapes
         ]
     for i in range(y.shape[0]):
         o = reduce(
             lambda o, w: o +
             [1 / (1 + np.exp(-np.dot(w, concat(
                 (o[-1], one)))))], self.W, [x[i]])
         dLu = (o[-1] - y[i]) * o[-1] * (1 - o[-1])
         for j in range(self.num_layers, 0, -1):
             dLW = np.dot(dLu.reshape(-1, 1),
                          concat((o[j - 1], one)).reshape(-1, 1).T)
             dLu = np.dot(self.W[j - 1][:, :-1].T,
                          dLu) * o[j - 1] * (1 - o[j - 1])
             self.W[j - 1] -= 0.1 * dLW
     return self
    def read_attn(x, xhat, h_dec_prev, Fx, Fy, gamma, N):

        Fx_t = np.transpose(Fx, perm=[0, 2, 1])

        x = np.reshape(x, [1, 128, 128])
        xhat = np.reshape(xhat, [1, 128, 128])

        FyxFx_t = np.reshape(np.matmul(Fy, np.matmul(x, Fx_t)), [-1, N * N])
        FyxhatFx_t = np.reshape(np.matmul(Fy, np.matmul(x, Fx_t)), [-1, N * N])

        return gamma * np.concat([FyxFx_t, FyxhatFx_t], 1)
Пример #30
0
def inputs_preprocssing(X, mode):
    # X \in [0, 255], RGB
    if mode == 'inception':
        return normalize_range(X, [0, 255], [-1, 1])
    elif mode == 'vgg':
        vgg_bgr_mean = [103.939, 116.779, 123.68]
        X_r, X_g, X_b = np.split(X, indices_or_sections=3, axis=3)
        X_b -= vgg_bgr_mean[0]
        X_g -= vgg_bgr_mean[1]
        X_r -= vgg_bgr_mean[2]
        return np.concat([X_b, X_g, X_r], axis=3)
Пример #31
0
def combine_triangles(triangle_info):
    from numpy import empty, float32, int32, concatenate as concat
    all_vertices = []
    all_normals = []
    all_triangles = []
    num_vertices = 0
    num_triangles = 0
    for i, info in enumerate(triangle_info):
        vertices, normals, triangles = info
        all_vertices.append(vertices)
        all_normals.append(normals)
        all_triangles.append(triangles + num_vertices)
        num_vertices += len(vertices)
        num_triangles += len(triangles)
    vertices = empty((num_vertices, 3), dtype=float32)
    normals = empty((num_vertices, 3), dtype=float32)
    triangles = empty((num_triangles, 3), dtype=int32)
    return (concat(all_vertices, out=vertices), concat(all_normals,
                                                       out=normals),
            concat(all_triangles, out=triangles))
Пример #32
0
def fir_pre_phase(b, x, n_ramp=None):
    # applies a FIR filter with a pre-phase ramp
    # to reduce ripple
    #
    # Arguments:
    # * b: FIR coefficients
    # * x: input signal
    # * n_ramp: number of samples in pre-ramp
    #           (default = len(b))
    signal = np.concat((np.linspace(-x[0], x[0], n_ramp), x))
    y = np.lfilter(b, 1, signal)
    return y[n_ramp + 1 :]
def generate_features(img_path):

    # Returns relative size of axes, and normalized sum of the rows and column
    rc_ratio, row_avg, col_avg = efg.get_img_edge_data(img_path, blur=3)

    # Combine the later
    row_col_arr = np.concatenate((row_avg, col_avg))

    # Run pca to collapse to 1/20 the original size, 85% variance
    pca_vals = _RC_PCA.transform(row_col_arr)

    return np.concat((np.array(rc_ratio), pca_vals))
Пример #34
0
def fir_pre_phase(b, x, n_ramp=None):
    # applies a FIR filter with a pre-phase ramp
    # to reduce ripple
    #
    # Arguments:
    # * b: FIR coefficients
    # * x: input signal
    # * n_ramp: number of samples in pre-ramp 
    #           (default = len(b))
    signal = np.concat((np.linspace(-x[0],x[0], n_ramp), x))
    y = np.lfilter(b,1,signal)
    return y[n_ramp+1:]
Пример #35
0
  def get_batch(self, batch_size):
    probs  = self.priority_tree[-self.num_leaf_nodes:] ** self.per_alpha
    probs /= probs.sum()

    self.indices = np.zeros((batch_size,), dtype=np.int32)
    priority_segment = self.total_priority / batch_size
    for i in range(batch_size):
      seg_start = priority_segment * i
      seg_end = priority_segment * (i + 1)
      # Sampling is linear in batch size, but logarithmic in buffer size
      tree_idx = self.extract_index(np.random.uniform(seg_start, seg_end))
      # convert tree_idx to buffer_idx which equals number of leaf nodes
      self.indices[i] = tree_idx - self.num_parent_nodes

    samples  = [self.buffer[idx] for idx in self.indices]
    # Importance Sampling reweighting
    weights  = (len(self.buffer) * probs[self.indices]) ** (-self.per_beta)
    weights /= weights.max()
    self.weights = np.array(weights, dtype=np.float32)

    state, action, reward, next_state, done = zip(*samples)
    return concat(state), action, reward, concat(next_state), done
Пример #36
0
    def makeBatch(self, params):
        size = np.random.randint(
            low=params['lowSize'],
            high=params['highSize'] + 1,
            size=[params['batchSize'], params['length'], 1])
        weight = np.random.randint(
            low=params['lowWeight'],
            high=params['highWeight'] + 1,
            size=[params['batchSize'], params['length'], 1])

        self.capacity = params['capacity']

        return np.concat([size, weight], axis=2)
Пример #37
0
    def extend_shape(self, vertices, normals, triangles, color=None):
        """Extend previous shape

        Parameters
        ----------
        vertices : :py:class:`numpy.array` of coordinates
        normals : :py:class:`numpy.array` of normals, one per vertex
        triangles : :py:class:`numpy.array` of vertex indices, multiple of 3
        color : either None, a single 4 element uint8 :py:class:`numpy.array`;
            or an array of those values, one per vertex.  If None, then the
            color is same as the last color of the existing shape.
        The associated atoms and description are that of the extended shape.

        """
        if self.vertices is None:
            raise ValueError("no shape to extend")
        asarray = numpy.asarray
        concat = numpy.concatenate
        if color is None or color.ndim == 1 or color.shape[0] == 1:
            colors = numpy.empty((vertices.shape[0], 4), dtype=numpy.uint8)
            if color is None:
                colors[:] = self.vertex_colors[-1]
            else:
                colors[:] = color
        else:
            colors = color.asarray(color, dtype=numpy.uint8)
            assert colors.shape[1] == 4 and colors.shape[0] == vertices.shape[0]
        offset = self.vertices.shape[0]
        new_vertex_colors = concat((self.vertex_colors, colors))
        self.set_geometry(
            asarray(concat((self.vertices, vertices)), dtype=numpy.float32),
            asarray(concat((self.normals, normals)), dtype=numpy.float32),
            asarray(concat((self.triangles, triangles + offset)),
                    dtype=numpy.int32))
        self.vertex_colors = new_vertex_colors
        s = self._shapes[-1]
        s.triangle_range = range(s.triangle_range.start,
                                 self.triangles.shape[0])
Пример #38
0
def fill_to_end_of_year(df):
    last = df.tail(1).index[0]
    year = last.year
    end = datetime.datetime(year + 1, 1, 1)
    index_lst = []
    i = 1
    while last + datetime.timedelta(days=i) < end:
        index_lst.append(last + datetime.timedelta(days=i))
        i += 1

    filler = np.empty((len(index_lst), len(df.columns)))
    rest = DataFrame(filler, columns=df.columns)
    rest.index = index_lst
    return np.concat((df, rest))
Пример #39
0
 def build_K_matrix(self):
   zeros = [
     [0,0,0],
     [0,0,0],
     [0,0,0]
   ]
   self.L2G = concat((concat((self.Tm,zeros),axis=1),concat((zeros,self.Tm),axis=1)),axis=0)
   self.G2L = concat((concat((self.TmT,zeros),axis=1),concat((zeros,self.TmT),axis=1)),axis=0)
   self.K = np.dot(np.dot(self.L2G,self.Km),self.G2L)
	def __responses(self, wts, X_in, sig, sig_0):
		"""
		Helper function that gets linear sum from previous layer (A) and
		saturated activation responses (Z) for a data point. Used in:
			train
		"""
		L = len(wts)
		constant_feat = np.ones((mat(X_in).shape[0],1)).flatten()	# constant feature
		# compute linear combination of inputs
		A = [arr([1])]
		Z = [concat((constant_feat, X_in))]

		for l in range(1, L):
			A.append(Z[l - 1].dot(wts[l - 1].T))					# compute linear combination of previous layer
			# pass through activation function and add constant feature
			Z.append(cols((np.ones((mat(A[l]).shape[0],1)),sig(A[l]))))

		A.append(arr(mat(Z[L - 1]) * mat(wts[L - 1]).T))
		Z.append(arr(sig_0(A[L])))									# output layer (saturate for classifier, not regressor)

		return A,Z
	def get_layers(self):
		S = arr([mat(self.wts[i]).shape[1] - 1 for i in range(len(self.wts))])
		S = concat((S, [mat(self.wts[-1]).shape[0]]))
		return S
Пример #42
0
                m1i = m1k
                m0i = m0k
            for j, r in zip(s2, rk):
                if j<0: continue
                m1i[j] += r
                m0i[j] += 1
        return m1, m0, m1k, m0k

if __name__ == "__main__":
    from numpy.random import binomial
    from numpy import concatenate as concat

    numpy.random.seed(123)
    d = 5
    phi = [[0.1, 0.7, 0.2], [0.1, 0.3, 0.9], [0.8, 0.1, 0.2]]
    orgR = concat([concat([binomial(1, p, size=(d,d)) for p in pp], axis=1) for pp in phi])
    i = numpy.arange(orgR.shape[0])
    numpy.random.shuffle(i)
    R = orgR[i,:]
    i = numpy.arange(orgR.shape[1])
    numpy.random.shuffle(i)
    R = R[:,i]

    model = IRM(R, alpha=1.0, a=1.0, b=1.0)
    maxv = -1e9
    for i in range(200):
        model.update()
        v = model.log_posterior()
        if v > maxv:
            maxv = v
            maxm = model.clone()