def get_GridFSim(x1, y1, x2, y2, img1):
    ''' Calculate estimated ice drift on first image based on feature tracking vectors'''
    
    # # initial drift inter-/extrapolation
    # linear triangulation
    x1Grid, y1Grid = np.meshgrid(range(img1.shape[1]), range(img1.shape[0]))
    x2GridFSim = griddata(np.array([y1, x1]).T, x2, np.array([y1Grid, x1Grid]).T, method='linear').T
    y2GridFSim = griddata(np.array([y1, x1]).T, y2, np.array([y1Grid, x1Grid]).T, method='linear').T
    # linear fit for entire grid
    A = np.vstack([np.ones(len(x1)), x1, y1 ]).T
    # find B in x2 = B * [x1, y1]
    Bx = np.linalg.lstsq(A, x2)[0]
    By = np.linalg.lstsq(A, y2)[0]
    # calculate simulated x2sim = B * [x1, y1]
    x1GridF = x1Grid.flatten()
    y1GridF = y1Grid.flatten()
    A = np.vstack([np.ones(len(x1GridF)), x1GridF, y1GridF]).T
    x2GridFSim_lf = np.dot(A, Bx).reshape(img1.shape)
    y2GridFSim_lf = np.dot(A, By).reshape(img1.shape)
    # fill NaN with lf
    gpi = np.isnan(x2GridFSim)
    x2GridFSim[gpi] = x2GridFSim_lf[gpi]
    y2GridFSim[gpi] = y2GridFSim_lf[gpi]

    return x2GridFSim, y2GridFSim
Example #2
0
def unit_vectors_from_cross_section(cross, index='index'):
    r"""Calculate the unit tanget and unit normal vectors from a cross-section.

    Given a path described parametrically by :math:`\vec{l}(i) = (x(i), y(i))`, we can find
    the unit tangent vector by the formula

    .. math:: \vec{T}(i) =
        \frac{1}{\sqrt{\left( \frac{dx}{di} \right)^2 + \left( \frac{dy}{di} \right)^2}}
        \left( \frac{dx}{di}, \frac{dy}{di} \right)

    From this, because this is a two-dimensional path, the normal vector can be obtained by a
    simple :math:`\frac{\pi}{2}` rotation.

    Parameters
    ----------
    cross : `xarray.DataArray`
        The input DataArray of a cross-section from which to obtain latitudes.
    index : `str`, optional
        A string denoting the index coordinate of the cross section, defaults to 'index' as
        set by `metpy.interpolate.cross_section`.

    Returns
    -------
    unit_tangent_vector, unit_normal_vector : tuple of `numpy.ndarray`
        Arrays describing the unit tangent and unit normal vectors (in x,y) for all points
        along the cross section.

    """
    x, y = distances_from_cross_section(cross)
    dx_di = first_derivative(x, axis=index).values
    dy_di = first_derivative(y, axis=index).values
    tangent_vector_mag = np.hypot(dx_di, dy_di)
    unit_tangent_vector = np.vstack([dx_di / tangent_vector_mag, dy_di / tangent_vector_mag])
    unit_normal_vector = np.vstack([-dy_di / tangent_vector_mag, dx_di / tangent_vector_mag])
    return unit_tangent_vector, unit_normal_vector
Example #3
0
	def shift(data,shiftdirection = "center"):
		numshifts = {"right":1,"left":1,"up":1,"down":1}
		
		shiftfuncs = {
			"right": lambda arr:np.hstack((zerocol(outarr),outarr)),
			"left": lambda arr: np.hstack((outarr,zerocol(outarr))),
			"up": lambda arr: np.vstack((outarr,zerorow(outarr))),
			"down": lambda arr: np.vstack((zerorow(outarr),outarr))
		}

		directionpairs =  \
			{"right":"left","left":"right","up":"down","down":"up"}
				
		if shiftdirection != "center":
			numshifts[shiftdirection] += 1
			numshifts[directionpairs[shiftdirection]] -= 1

		outarr = cp.deepcopy(data)
		zerocol = lambda arr: np.zeros((arr.shape[0],1))
		zerorow = lambda arr: np.zeros((1,arr.shape[1]))

		for direction in numshifts.keys():
			for numtimes in xrange(numshifts[direction]):
				outarr = shiftfuncs[direction](outarr)

		return outarr
Example #4
0
    def _sample(pts, shape, norm):
        (x, y, z), floor = np.modf(pts.T)
        floor = floor.astype(int)
        ceil = floor + 1
        x[x < 0] = 0
        y[y < 0] = 0
        z[z < 0] = 0

        i000 = np.ravel_multi_index((floor[2], floor[1], floor[0]), shape, mode='clip')
        i100 = np.ravel_multi_index((floor[2], floor[1],  ceil[0]), shape, mode='clip')
        i010 = np.ravel_multi_index((floor[2],  ceil[1], floor[0]), shape, mode='clip')
        i001 = np.ravel_multi_index(( ceil[2], floor[1], floor[0]), shape, mode='clip')
        i101 = np.ravel_multi_index(( ceil[2], floor[1],  ceil[0]), shape, mode='clip')
        i011 = np.ravel_multi_index(( ceil[2],  ceil[1], floor[0]), shape, mode='clip')
        i110 = np.ravel_multi_index((floor[2],  ceil[1],  ceil[0]), shape, mode='clip')
        i111 = np.ravel_multi_index(( ceil[2],  ceil[1],  ceil[0]), shape, mode='clip')

        v000 = (1-x)*(1-y)*(1-z)
        v100 = x*(1-y)*(1-z)
        v010 = (1-x)*y*(1-z)
        v110 = x*y*(1-z)
        v001 = (1-x)*(1-y)*z
        v101 = x*(1-y)*z
        v011 = (1-x)*y*z
        v111 = x*y*z

        allj = np.vstack([i000, i100, i010, i001, i101, i011, i110, i111]).T.ravel()
        data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).T.ravel()

        uniquej = np.unique(allj)
        uniquejdata = np.array([data[allj==j].sum() for j in uniquej])
        
        return uniquej, uniquejdata / float(norm)
Example #5
0
    def insert(self,A,B,news=None):
        '''
        Insert two blocks into the center of the cylinder.

        Parameters
        ----------
        A,B : any hashable object
            The scopes of the insert block points.
        news : list of any hashable object, optional
            The new scopes for the points of the cylinder before the insertion.
            If None, the old scopes remain unchanged.
        '''
        aspids,bspids,asrcoords,bsrcoords=[],[],[],[]
        for i,rcoord in enumerate(self.block):
            aspids.append(PID(scope=A,site=i))
            bspids.append(PID(scope=B,site=i))
            asrcoords.append(rcoord-self.translation/2)
            bsrcoords.append(rcoord+self.translation/2)
        if len(self)==0:
            self.pids=aspids+bspids
            self.rcoords=np.vstack([asrcoords,bsrcoords])
        else:
            if news is not None:
                assert len(news)*len(self.block)==len(self)
                self.pids=[PID(scope=scope,site=i) for scope in news for i in range(len(self.block))]
            apids,bpids=self.pids[:len(self)//2],self.pids[len(self)//2:]
            arcoords,brcoords=self.rcoords[:len(self)//2]-self.translation,self.rcoords[len(self)//2:]+self.translation
            self.pids=apids+aspids+bspids+bpids
            self.rcoords=np.vstack([arcoords,asrcoords,bsrcoords,brcoords])
        self.icoords=np.zeros(self.rcoords.shape)
        if np.any(np.asarray(list(self.neighbours.values()))==np.inf):
            self.neighbours={i:length for i,length in enumerate(minimumlengths(self.rcoords,self.vectors,self.nneighbour,Lattice.ZMAX))}
Example #6
0
def test_ovo_ties():
    # test that ties are broken using the decision function, not defaulting to
    # the smallest label
    X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
    y = np.array([2, 0, 1, 2])
    multi_clf = OneVsOneClassifier(Perceptron())
    ovo_prediction = multi_clf.fit(X, y).predict(X)

    # recalculate votes to make sure we have a tie
    predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
    scores = np.vstack([clf.decision_function(X)
                        for clf in multi_clf.estimators_])
    # classifiers are in order 0-1, 0-2, 1-2
    # aggregate votes:
    votes = np.zeros((4, 3))
    votes[np.arange(4), predictions[0]] += 1
    votes[np.arange(4), 2 * predictions[1]] += 1
    votes[np.arange(4), 1 + predictions[2]] += 1
    # for the first point, there is one vote per class
    assert_array_equal(votes[0, :], 1)
    # for the rest, there is no tie and the prediction is the argmax
    assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
    # for the tie, the prediction is the class with the highest score
    assert_equal(ovo_prediction[0], 0)
    # in the zero-one classifier, the score for 0 is greater than the score for
    # one.
    assert_greater(scores[0][0], scores[0][1])
    # score for one is greater than score for zero
    assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
    # score for one is greater than score for two
    assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
Example #7
0
    def _compute_multipliers(self, X, y):
        n_samples, n_features = X.shape

        K = self._gram_matrix(X)
        # Solves
        # min 1/2 x^T P x + q^T x
        # s.t.
        #  Gx \coneleq h
        #  Ax = b

        P = cvxopt.matrix(np.outer(y, y) * K)
        q = cvxopt.matrix(-1 * np.ones(n_samples))

        # -a_i \leq 0
        # TODO(tulloch) - modify G, h so that we have a soft-margin classifier
        G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
        h_std = cvxopt.matrix(np.zeros(n_samples))

        # a_i \leq c
        G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))
        h_slack = cvxopt.matrix(np.ones(n_samples) * self._c)

        G = cvxopt.matrix(np.vstack((G_std, G_slack)))
        h = cvxopt.matrix(np.vstack((h_std, h_slack)))

        A = cvxopt.matrix(y, (1, n_samples))
        b = cvxopt.matrix(0.0)

        solution = cvxopt.solvers.qp(P, q, G, h, A, b)

        # Lagrange multipliers
        return np.ravel(solution['x'])
Example #8
0
def _vstack(to_stack):
    if all(x.dtype == _NS_DTYPE for x in to_stack):
        # work around NumPy 1.6 bug
        new_values = np.vstack([x.view('i8') for x in to_stack])
        return new_values.view(_NS_DTYPE)
    else:
        return np.vstack(to_stack)
Example #9
0
def display_layer(X, filename="../images/layer.png"):
    """
    Produces an image, composed of the given N images, patches or neural network weights,
    stored in the array X. Saves it with the given filename.
    :param X: numpy array of size (NxD) — N images, patches or neural network weights
    :param filename: a string, the name of the produced file
    :return: None
    """
    if not isinstance(X, np.ndarray):
        raise TypeError("'X' must be a numpy array")
    N, D = X.shape
    d = get_reshaped_image_size(D)

    if N == 1:
        return X.reshape(d, d, 3)
    divizors = [n for n in range(1, N) if N % n == 0]
    im_sizes = divizors[int(len(divizors) / 2)], int(N / divizors[int(len(divizors) / 2)])
    for i in range(im_sizes[0]):
        # img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        img_row = np.hstack((np.zeros((d, 1, 3)), np.array(X[i * im_sizes[0], :].reshape(d, d, 3))))
        img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        for j in range(1, im_sizes[1]):
            img_row = np.hstack((img_row, X[i * im_sizes[1] + j, :].reshape(d, d, 3)))
            img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        if i == 0:
            img = img_row
        else:
            img = np.vstack((img, img_row))
        img = np.vstack((img, np.zeros((1, img.shape[1], 3))))
    img = np.vstack((np.zeros((1, img.shape[1], 3)), img))
    imsave(filename, img)
    return img
Example #10
0
def select_minibatch(x_win, masks, extra, y, window_size, i, minibatch_size, order=None, add_oov_noise=False, oov_noise_prob=0.0):
    n = len(masks)
    if order is None:
        order = range(n)
    ms = min(minibatch_size, n-i)
    if ms > 1:
        minibatch_mask = np.vstack([masks[j] for j in range(i, min(i+ms, n))])
        max_len = np.max(np.argmin(minibatch_mask, axis=1))
        if max_len == 0:
            max_len = len(masks[i])
        try:
            minibatch_mask = minibatch_mask[:, 0: max_len].reshape((ms, max_len))
        except:
            e = sys.exc_info()[0]
            print e
            print max_len
            print minibatch_mask
        minibatch_x = x_win[0: max_len, order[i: min(i+ms, n)], :]
        minibatch_extra = np.vstack([extra[j] for j in range(i, min(i+ms, n))])
        minibatch_y = np.vstack([y[j] for j in range(i, min(i+ms, n))])

    else:
        max_len = np.argmin(masks[i])
        if max_len == 0:
            max_len = len(masks[i])
        minibatch_mask = np.array(masks[i][0: max_len]).reshape((1, max_len))
        minibatch_x = x_win[0: max_len, order[i], :].reshape((max_len, 1, window_size))
        minibatch_extra = np.array(extra[i]).reshape((1, len(extra[i])))
        minibatch_y = np.array(y[i]).reshape((1, len(y[i])))

    if add_oov_noise:
        draws = np.random.rand(max_len, ms, window_size)
        minibatch_x = np.array(minibatch_x * np.array(draws > oov_noise_prob, dtype='int32'), dtype='int32')

    return minibatch_x, minibatch_mask, minibatch_extra, minibatch_y
Example #11
0
def standardize_polygons_str(data_str):
    """Given a POLYGON string, standardize the coordinates to a 1x1 grid.

    Input : data_str (taken from above)
    Output: tuple of polygon objects
    """
    # find all of the polygons in the letter (for instance an A
    # needs to be constructed from 2 polygons)
    path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip())

    # convert the data into a numpy array
    polygons_data = []
    for path_str in path_strs:
        data = np.array([
            tuple(map(float, x.split())) for x in path_str.strip().split(",")])
        polygons_data.append(data)

    # standardize the coordinates
    min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)
    max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)
    for data in polygons_data:
        data[:, ] -= min_coords
        data[:, ] /= (max_coords - min_coords)

    polygons = []
    for data in polygons_data:
        polygons.append(load_wkt(
            "POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data)))

    return tuple(polygons)
Example #12
0
def upsample_swc(swc):

    tswc = swc.copy()

    id_idx = {}
    # Build a nodeid->idx hash table
    for nodeidx in range(tswc.shape[0]):
        id_idx[tswc[nodeidx, 0]] = nodeidx

    newid = tswc[:,0].max() + 1
    newnodes = []
    for nodeidx in range(tswc.shape[0]):
        pid = tswc[nodeidx, -1] # parent id

        if pid not in id_idx:
            # raise Exception('Parent with id %d not found' % pid)
            continue

        nodepos = tswc[nodeidx, 2:5]
        parentpos = tswc[id_idx[pid], 2:5]

        if np.linalg.norm(nodepos - parentpos) > 1.: # Add a node in the middle if too far
            mid_pos = nodepos + 0.5 * (parentpos - nodepos)
            newnodes.append( np.asarray([newid, 2, mid_pos[0], mid_pos[1], mid_pos[2], 1, pid]) )
            newid += 1
            tswc[nodeidx, -1] = newid

    # Stack the new nodes to the end of the swc file
    newnodes = np.vstack(newnodes)
    tswc = np.vstack((tswc, newnodes))
    return tswc
Example #13
0
File: vocab.py Project: GYGit/nengo
    def add(self, key, p):
        """Add a new semantic pointer to the vocabulary.

        The pointer value can be a `.SemanticPointer` or a vector.
        """
        if self.readonly:
            raise ReadonlyError(attr='Vocabulary',
                                msg="Cannot add semantic pointer '%s' to "
                                    "read-only vocabulary." % key)

        if not key[0].isupper():
            raise SpaParseError(
                "Semantic pointers must begin with a capital letter.")
        if not isinstance(p, pointer.SemanticPointer):
            p = pointer.SemanticPointer(p)

        if key in self.pointers:
            raise ValidationError("The semantic pointer %r already exists"
                                  % key, attr='pointers', obj=self)

        self.pointers[key] = p
        self.keys.append(key)
        self.vectors = np.vstack([self.vectors, p.v])

        # Generate vector pairs
        if self.include_pairs and len(self.keys) > 1:
            for k in self.keys[:-1]:
                self.key_pairs.append('%s*%s' % (k, key))
                v = (self.pointers[k] * p).v
                self.vector_pairs = np.vstack([self.vector_pairs, v])
    def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
        # Convert event list into frame-based representation
        system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
        annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)

        # Fix durations of both event_rolls to be equal
        if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
            padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
            system_event_roll = numpy.vstack((system_event_roll, padding))

        if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
            padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
            annotated_event_roll = numpy.vstack((annotated_event_roll, padding))

        # Compute frame-based metrics
        Nref = sum(sum(annotated_event_roll))
        Ntot = sum(sum(system_event_roll))
        Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
        Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
        Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
        Nsubs = min(Nfp, Nfn)

        eps = numpy.spacing(1)

        results = dict()
        results['Rec'] = Ntp / (Nref + eps)
        results['Pre'] = Ntp / (Ntot + eps)
        results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
        results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)

        return results
Example #15
0
    def train(self, training_set, test_set=None, pool=None, N=200):
        M = map if pool is None else pool.map

        user_items = [[] for i in range(self.nusers)]
        item_users = [[] for i in range(self.nitems)]
        [(user_items[u].append(a), item_users[a].append(u))
         for u, a in training_set]

        if test_set is not None:
            test_user_items = defaultdict(list)
            [test_user_items[u].append(a) for u, a in test_set]
            test_args = [(u, t, user_items[u])
                         for u, t in test_user_items.items()]

        for i in range(10):
            print("Updating users")
            vtv = np.dot(self.V.T, self.V)
            self.U = np.vstack(M(_function_wrapper(self,
                                                   "compute_user_update",
                                                   vtv), user_items))

            print("Updating items")
            utu = np.dot(self.U.T, self.U)
            self.V = np.vstack(M(_function_wrapper(self,
                                                   "compute_item_update",
                                                   utu), item_users))

            # Compute the held out recall.
            if test_set is not None:
                print("Computing held out recall")
                yield np.mean(M(_function_wrapper(self, "compute_recall", N=N),
                                test_args))
            else:
                yield 0.0
Example #16
0
def test_SeedCoherenceAnalyzer():
    """ Test the SeedCoherenceAnalyzer """
    methods = (None,
           {"this_method": 'welch', "NFFT": 256},
           {"this_method": 'multi_taper_csd'},
           {"this_method": 'periodogram_csd', "NFFT": 256})

    Fs = np.pi
    t = np.arange(256)
    seed1 = np.sin(10 * t) + np.random.rand(t.shape[-1])
    seed2 = np.sin(10 * t) + np.random.rand(t.shape[-1])
    target = np.sin(10 * t) + np.random.rand(t.shape[-1])
    T = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs)
    T_seed1 = ts.TimeSeries(seed1, sampling_rate=Fs)
    T_seed2 = ts.TimeSeries(np.vstack([seed1, seed2]), sampling_rate=Fs)
    T_target = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs)
    for this_method in methods:
        if this_method is None or this_method['this_method']=='welch':
            C1 = nta.CoherenceAnalyzer(T, method=this_method)
            C2 = nta.SeedCoherenceAnalyzer(T_seed1, T_target,
                                           method=this_method)
            C3 = nta.SeedCoherenceAnalyzer(T_seed2, T_target,
                                           method=this_method)

            npt.assert_almost_equal(C1.coherence[0, 1], C2.coherence[1])
            npt.assert_almost_equal(C2.coherence[1], C3.coherence[0, 1])
            npt.assert_almost_equal(C1.phase[0, 1], C2.relative_phases[1])
            npt.assert_almost_equal(C1.delay[0, 1], C2.delay[1])

        else:
            npt.assert_raises(ValueError,nta.SeedCoherenceAnalyzer, T_seed1,
                              T_target, this_method)
Example #17
0
        def get_new_cell(self):
            """Returns new basis vectors"""
            a = np.sqrt(self.a)
            b = np.sqrt(self.b)
            c = np.sqrt(self.c)

            ad = self.atoms.cell[0] / np.linalg.norm(self.atoms.cell[0])

            Z = np.cross(self.atoms.cell[0], self.atoms.cell[1])
            Z /= np.linalg.norm(Z)
            X = ad - np.dot(ad, Z) * Z
            X /= np.linalg.norm(X)
            Y = np.cross(Z, X)

            alpha = np.arccos(self.x / (2 * b * c))
            beta = np.arccos(self.y / (2 * a * c))
            gamma = np.arccos(self.z / (2 * a * b))

            va = a * np.array([1, 0, 0])
            vb = b * np.array([np.cos(gamma), np.sin(gamma), 0])
            cx = np.cos(beta)
            cy = (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) \
                / np.sin(gamma)
            cz = np.sqrt(1. - cx * cx - cy * cy)
            vc = c * np.array([cx, cy, cz])

            abc = np.vstack((va, vb, vc))
            T = np.vstack((X, Y, Z))
            return np.dot(abc, T)
Example #18
0
def load_hdf5_file(input_file):
    '''Load an HDF5 file into a Timestream object

    Return a 2-tuple containing a dictionary of metadata and a Timestream
    object'''

    with h5py.File(input_file) as h5_file:
        if 'time_series' in h5_file:
            dataset = h5_file['time_series']
            return dict(h5_file.attrs.items()), Timestream(
                time_s=dataset['time_s'].astype(np.float),
                pctime=dataset['pctime'].astype(np.float),
                phb=dataset['phb'].astype(np.int),
                record=dataset['record'].astype(np.int),
                demodulated=np.vstack([dataset[x].astype(np.float) for x in (
                    'dem_Q1_ADU',
                    'dem_U1_ADU',
                    'dem_U2_ADU',
                    'dem_Q2_ADU')]).transpose(),
                power=np.vstack([dataset[x].astype(np.float) for x in (
                    'pwr_Q1_ADU',
                    'pwr_U1_ADU',
                    'pwr_U2_ADU',
                    'pwr_Q2_ADU')]).transpose(),
                rfpower_db=dataset['rfpower_dB'].astype(np.float),
                freq_hz=dataset['freq_Hz'].astype(np.float),
            )
        else:
            return None, None
Example #19
0
def split_data(min_timbre=100, timbre_width=12, min_songs=4, data_file='mfcc'):
    ArtistMapping, ArtistIdMapping, data = generate_data(
        min_timbre=min_timbre, timbre_width=timbre_width, min_songs=min_songs)
    train = numpy.zeros((0, min_timbre * timbre_width + 1))
    validation = numpy.zeros((0, min_timbre * timbre_width + 1))
    test = numpy.zeros((0, min_timbre * timbre_width + 1))

    print 'Splitting data...'
    for artist_id in ArtistIdMapping:
        indices = ArtistIdMapping[artist_id][1]

        valid_data = data[indices[0]].reshape(1, -1)
        validation = numpy.vstack((validation, valid_data))

        test_indx = 1 + max(int((len(indices) - 1) * .3), 1)

        for i in range(1, test_indx):
            test_data = data[indices[i]].reshape(1, -1)
            test = numpy.vstack((test, test_data))

        for i in range(test_indx, len(indices)):
            train_data = data[indices[i]].reshape(1, -1)
            train = numpy.vstack((train, train_data))

    print 'Saving Data...'

    numpy.save('data/' + data_file + '_songs_{}'.format(min_songs) + '_test', test, allow_pickle=True)
    numpy.save('data/' + data_file + '_songs_{}'.format(min_songs) + '_train', train, allow_pickle=True)
    numpy.save('data/' + data_file + '_songs_{}'.format(min_songs) + '_valid', validation, allow_pickle=True)
    numpy.save('data/' + data_file + '_dict_songs_{}'.format(min_songs), ArtistMapping, allow_pickle=True)
    numpy.save('data/' + data_file + '_dictId_songs_{}'.format(min_songs), ArtistIdMapping, allow_pickle=True)
    return ArtistMapping, ArtistIdMapping, train, validation, test
Example #20
0
def dobetterstuff(inpath):
    data_files = [f for f in os.listdir(inpath) if f.endswith('.mel.npy')]
    random.shuffle(data_files)
    artists = set([f[:18] for f in data_files])
    artist_string_to_id = dict([(s,i) for i, s in enumerate(artists)])

    def get_split(datafiles___, splitpercent):
        # gen = filtered_stratified_split(datafiles___,
        #                                 sklearn.cross_validation.StratifiedShuffleSplit,
        #                                 [1] * len(datafiles___), n_iterations=1, test_size=splitpercent)
        gen = sklearn.cross_validation.ShuffleSplit(len(datafiles___), 1, splitpercent)
        for i_trs, i_tes in gen:
            return [datafiles___[i] for i in i_trs],  [datafiles___[i] for i in i_tes]

    training_files, test_files =  get_split(data_files, .2)
    training_files, validation_files = get_split(training_files, .2)

    print training_files
    print test_files
    print validation_files

    train_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in training_files])
    train_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in training_files])
    test_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in test_files])
    test_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in test_files])
    validation_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in validation_files])
    validation_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in validation_files])

    datasets = [(train_set_x, train_set_y), (validation_set_x, validation_set_y), (test_set_x, test_set_y)]
    return datasets
Example #21
0
def sequence():
    binnedFlour = numpy.zeros(binNumber)
    for iteration in range(iterations):
        print 'recording trace {0} out of {1}'.format(iteration, iterations)
        trfpga.perform_time_resolved_measurement()
        trigger.trigger('PaulBox')
        timetags = trfpga.get_result_of_measurement().asarray
        print timetags
        #saving timetags
        dv.cd(['','Experiments', experimentName, dirappend, 'timetags'],True )
        dv.new('timetags iter{0}'.format(iteration),[('Time', 'sec')],[('PMT counts','Arb','Arb')] )
        dv.add_parameter('iteration',iteration)
        ones = numpy.ones_like(timetags)
        dv.add(numpy.vstack((timetags,ones)).transpose())
        #add to binning of the entire sequence
        newbinned = numpy.histogram(timetags, binArray )[0]
        binnedFlour = binnedFlour + newbinned
        time.sleep(1)  # corresponds to the cooling time between dark times. (1 sec lasercooling)
    print 'getting result and adding to data vault'
    dv.cd(['','Experiments', experimentName, dirappend] )
    dv.new('binnedFlourescence',[('Time', 'sec')], [('PMT counts','Arb','Arb')] )
    data = numpy.vstack((binArray[0:-1], binnedFlour)).transpose()
    dv.add(data)
    dv.add_parameter('plotLive',True)
    dvParameters.saveParameters(dv, globalDict)
    dvParameters.saveParameters(dv, pboxDict)
Example #22
0
def study_redmapper_lrg_3d(hemi='north'):
    # create 3d grid object
    grid = grid3d(hemi=hemi)
    
    # load SDSS data
    sdss = load_sdss_data_both_catalogs(hemi)
    
    # load redmapper catalog
    rm = load_redmapper(hemi=hemi)
    
    # get XYZ positions (Mpc) of both datasets
    x_sdss, y_sdss, z_sdss = grid.xyz_from_radecz(sdss['ra'], sdss['dec'], sdss['z'], applyzcut=False)
    x_rm, y_rm, z_rm = grid.xyz_from_radecz(rm['ra'], rm['dec'], rm['z_spec'], applyzcut=False)
    pos_sdss = np.vstack([x_sdss, y_sdss, z_sdss]).T
    pos_rm = np.vstack([x_rm, y_rm, z_rm]).T

    # build a couple of KDTree's, one for SDSS, one for RM.
    from sklearn.neighbors import KDTree
    tree_sdss = KDTree(pos_sdss, leaf_size=30)
    tree_rm = KDTree(pos_rm, leaf_size=30)

    lrg_counts = tree_sdss.query_radius(pos_rm, 100., count_only=True)
    pl.clf()
    pl.hist(lrg_counts, bins=50)
    
    
    ipdb.set_trace()
Example #23
0
def image_border(rgb, left=0, right=0, top=0, bottom=0, color=[1, 1, 1]):
    orig_shape = rgb.shape

    if left > 0:
        # note: do this every time because it changes throughout
        height, width = rgb.shape[0:2]
        pad = rgb_pad(height, left, color)
        rgb = np.hstack((pad, rgb))

    if right > 0:
        height, width = rgb.shape[0:2]
        pad = rgb_pad(height, right, color)
        rgb = np.hstack((rgb, pad))

    if top > 0:
        height, width = rgb.shape[0:2]
        pad = rgb_pad(top, width, color)
        rgb = np.vstack((pad, rgb))
        assert rgb.shape[0] == height + top

    if bottom > 0:
        height, width = rgb.shape[0:2]
        pad = rgb_pad(bottom, width, color)
        rgb = np.vstack((rgb, pad))
        assert rgb.shape[0] == height + bottom

    assert rgb.shape[0] == orig_shape[0] + top + bottom
    assert rgb.shape[1] == orig_shape[1] + left + right

    return rgb
Example #24
0
def eventPhase(iyd,n=[1,0],eps=1e-9):
  '''
  Construct a analytic signal like version of the system
  '''
  # Work out the angle of the normal vector
  a = arctan2(n[1],n[0])
  z = (iyd[0,:]+1j*iyd[1,:])*exp(1j*a)
  # Find zero crossings of first co-ordinate
  idx0 = where(logical_and(z.real[:-1]<0,z.real[1:]>0))[0]
  # Produce a two dimensional array which we can be interpolated to 
  # estimate phase by assuming a linear trend between events
  idx2P = idx0-eps
  tme = arange(z.size)
  fv = hstack([vstack([idx0,zeros_like(idx0)]),vstack([idx2P,2*pi*ones_like(idx2P)])])
  fv = array(sorted(fv.T,key=lambda x: x[0])).T
  # Fix edge condition
  if fv[1,0] > pi:
    fv = fv[:,1:]
  # Interpolate phase
  nph = interp1d(*fv,bounds_error=False)(tme)
  # We now have nans on the ends, do a linear interpolation to get average 
  # phase velocity and add on to the ends a linear trend like this
  gdIdx = logical_not(isnan(nph))
  nph[gdIdx]=unwrap(nph[gdIdx])
  m,c = polyfit(tme[gdIdx],nph[gdIdx],1)
  if fv[0,-1]+1!= nph.size:
    nph[fv[0,-1]+1:] = nph[fv[0,-1]]+m*(arange(nph[fv[0,-1]+1:].size)+1)
  if fv[0,0]!=0:
    nph[:fv[0,0]] = nph[fv[0,0]]-m*(nph[:fv[0,0]].size-arange(nph[:fv[0,0]].size))
  return(nph)
Example #25
0
    def process_current_split(self):
        # Training of node on training data
        for data, label in self.input_node.request_data_for_training(False):
            self.train(data, label)

        # Compute performance metrics SSNR_AS and SSNR_vs on the training
        # data
        performance = {"ssnr_as" : self.ssnr.ssnr_as(),
                       "ssnr_vs" : self.ssnr.ssnr_vs()}

        # Collect test data (if any)
        X_test = None
        D_test = None
        for data, label in self.input_node.request_data_for_testing():
            if label == self.erp_class_label:
                D = numpy.diag(numpy.ones(data.shape[0]))
            else:
                D = numpy.zeros((data.shape[0], data.shape[0]))

            if X_test is None:
                X_test = deepcopy(data)
                D_test = D
            else:
                X_test = numpy.vstack((X_test, data))
                D_test = numpy.vstack((D_test, D))

        # If there was separate test data:
        # compute metrics that require test data
        if X_test is not None:
            performance["ssnr_vs_test"] = self.ssnr.ssnr_vs_test(X_test, D_test)

        # Add SSNR-based metrics computed in this split to result collection
        self.ssnr_collection.add_split(performance, train=False,
                                       split=self.current_split,
                                       run=self.run_number)
Example #26
0
	def _normals(self, hits, directs):
		"""
		Finds the normal to the parabola in a bunch of intersection points, by
		taking the derivative and rotating it. Used internally by quadric.
		
		Arguments:
		hits - the coordinates of intersections, as an n by 3 array.
		directs - directions of the corresponding rays, n by 3 array.
		"""
		hit = N.dot(N.linalg.inv(self._working_frame), N.vstack((hits.T, N.ones(hits.shape[0]))))
		dir_loc = N.dot(self._working_frame[:3,:3].T, directs.T)

		partial_x = 2.*hit[0]*self.a+self.c*hit[1]+self.d
		partial_y = 2.*hit[1]*self.b+self.c*hit[0]+self.e
		partial_z = -1*N.ones(N.shape(hits)[0])
		
		local_normal = N.vstack((partial_x, partial_y, partial_z))
		local_unit = local_normal/N.sqrt(N.sum(local_normal**2, axis=0))

		down = N.sum(dir_loc * local_unit, axis=0) > 0.
		local_unit[:,down] *= -1

		normals = N.dot(self._working_frame[:3,:3], local_unit)

		return normals
Example #27
0
File: gmf.py Project: me-manu/gmf
    def r_log_spiral(self,phi):
	"""
	return distance from center for angle phi of logarithmic spiral

	Parameters
	----------
	phi: scalar or np.array with polar angle values

	Returns
	-------
	r(phi) = rx * exp(b * phi) as np.array

	Notes
	-----
	see http://en.wikipedia.org/wiki/Logarithmic_spiral
	"""
	if np.isscalar(phi):
	    phi = np.array([phi])
	ones = np.ones(phi.shape[0])

	# self.rx.shape = 8
	# phi.shape = p
	# then result is given as (8,p)-dim array, each row stands for one rx

	result = np.tensordot(self.rx , np.exp((phi - 3.*pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0)
	result = np.vstack((result, np.tensordot(self.rx , np.exp((phi - pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
	result = np.vstack((result, np.tensordot(self.rx , np.exp((phi + pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
	return np.vstack((result, np.tensordot(self.rx , np.exp((phi + 3.*pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
def generate_delta(params):
    print params
    D = params["D"]
    beta = params["beta"]
    npts = params["npts"]
    print "DOS half-bandwidth : ", D
    Gamma = params["gamma"]
    dos_prec = 0.1 

    omega_grid = np.arange(-2*D,2*D,dos_prec)
    dos_vals = dos_function(omega_grid) 
    data = np.vstack([omega_grid,dos_vals]).transpose()
    np.savetxt("dos.dat",data)

    fermi = lambda w : 1. / (1.+np.exp(beta*w))
    delta_wt = lambda tau, w : -fermi(w) * np.exp(tau*w) * dos_function(w) * Gamma
    delta_f = lambda tau : integrate.quad(lambda w: -fermi(w) * np.exp(tau*w) * dos_function(w) * Gamma, -2*D, 2*D) 

    tau_grid = np.linspace(0,beta,npts+1)
    delta_vals = np.array([delta_f(x)[0] for x in tau_grid])
    data_out = np.vstack([range(npts+1), delta_vals, delta_vals]) 
    fname = "delta_tau.dat"
    np.savetxt("delta_tau.dat", data_out.transpose())
    print "Saved", fname

    kramers_kronig_imag = lambda z : integrate.quad(lambda w: np.imag(dos_function(w) / (1j*z - w)), -2*D, 2*D)
    kramers_kronig_real = lambda z : integrate.quad(lambda w: np.real(dos_function(w) / (1j*z - w)), -2*D, 2*D)
    matsubara_grid = (2*np.arange(0, npts, 1, dtype=np.float) + 1)*np.pi/beta
    delta_iw = np.array([[x, kramers_kronig_real(x)[0], kramers_kronig_imag(x)[0]] for x in matsubara_grid])
    fname = "delta_iw.dat"
    np.savetxt(fname, delta_iw)
    print "Saved", fname
def generate_seq_to_one_hot(X, Y, vocab_size, batch_size):
    n_samples = len(X)
    seq_len = len(X[0])
    start = 0
    while 1:
        stop = start + batch_size
        chunk = X[start: stop]
        slices = []
        for i, seq_indexes in enumerate(chunk):
            x_slice = np.zeros([seq_len, vocab_size])
            x_slice[np.arange(seq_len), seq_indexes] = 1
            slices.append(x_slice)
        x_out = np.stack(slices, axis=0)
        y_out = Y[start: stop]
        start += batch_size
        if (start + batch_size) > n_samples:
            print 'reshuffling, %s + %s > %s' % (start, batch_size, n_samples)
            remaining_X = X[start: start + batch_size]
            remaining_Y = Y[start: start + batch_size]
            random_index = np.random.permutation(n_samples)
            X = np.vstack((remaining_X, X[random_index, :]))
            Y = np.vstack((remaining_Y, Y[random_index, :]))
            start = 0
            n_samples = len(X)
        yield x_out, y_out
Example #30
0
 def offsetPlane(plane, x, y):
     """
     Takes a numpy 2D array and returns the same plane offset by x and y,
     adding rows and columns of 0 values
     """
     height, width = plane.shape
     dataType = plane.dtype
     # shift x by cropping, creating a new array of columns and stacking
     # horizontally
     if abs(x) > 0:
         newCols = zeros((height, abs(x)), dataType)
         x1 = max(0, 0 - x)
         x2 = min(width, width - x)
         crop = plane[0:height, x1:x2]
         if x > 0:
             plane = hstack((newCols, crop))
         else:
             plane = hstack((crop, newCols))
     # shift y by cropping, creating a new array of rows and stacking
     # vertically
     if abs(y) > 0:
         newRows = zeros((abs(y), width), dataType)
         y1 = max(0, 0 - y)
         y2 = min(height, height - y)
         crop = plane[y1:y2, 0:width]
         if y > 0:
             plane = vstack((newRows, crop))
         else:
             plane = vstack((crop, newRows))
     return plane
Example #31
0
                 
fontP = FontProperties()
fontP.set_size('small')
                 
for i,ax_alh in enumerate(grid_alh):

    dConv = np.array([])
    dnConv = np.array([])
    
    ax_alh.set_title('{0} August, 2010'.format(int(odkeys[i].split('_')[2])), fontproperties=fontP )
    
    if resDict[odkeys[i]]['Converged']:
        
        convergedSoln = resDict[odkeys[i]]['Converged']    
        
        dConv = np.vstack([j[0:10] for j in convergedSoln])
        cLatdata_c = np.vstack([j[-2] for j in convergedSoln])
        cLondata_c = np.vstack([j[-1] for j in convergedSoln])

    if resDict[odkeys[i]]['NonConverged']:

        nconvergedSoln = resDict[odkeys[i]]['NonConverged']    
            
        dnConv = np.vstack([j[0:10] for j in nconvergedSoln])
        cLatdata_nc = np.vstack([j[-2] for j in nconvergedSoln])
        cLondata_nc = np.vstack([j[-1] for j in nconvergedSoln])    

    plt.sca(ax_alh)

    if dConv.any():
        
Example #32
0
    T = model.T
    cost = 0

    b_t = b

    for t in xrange(0, T - 1):
        x_t, s_t = decompose_belief(b_t, model)
        cost += model.alpha_belief * ml.trace(s_t * s_t)
        cost += model.alpha_control * ml.sum(U[:, t].T * U[:, t])
        b_t = belief_dynamics(b_t, U[:, t], None, model)

    x_T, s_T = decompose_belief(b_t, model)
    cost += model.alpha_final_belief * ml.trace(s_T * s_T)

    return cost


if __name__ == '__main__':
    import model
    model = model.Model()
    model.xDim = 4
    x = np.matrix(np.random.rand(4, 1))
    svec = np.matrix([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).T
    b = np.vstack((x, svec))
    x, s, constraints = cvxpy_decompose_belief(b, model)

    x_actual, s_actual = decompose_belief(b, model)

    IPython.embed()
Example #33
0
def compute_path_pinv(length=50):
  deg = 3
  x = np.arange(length*1.0)
  X = np.vstack(tuple(x**n for n in range(deg, -1, -1))).T
  pinv = np.linalg.pinv(X)
  return pinv
Example #34
0
def fill_between_steps(ax, x, y1, y2=0, step_where='pre', **kwargs):
    ''' Fill between for a step plot histogram.
    Modified from original code produced by T. Caswell (https://github.com/tacaswell).

    Parameters
    ----------
    ax : Axes
       The axes to draw to

    x : array-like
        Array/vector of index values.

    y1 : array-like or float
        Array/vector of values to be filled under.
    y2 : array-Like or float, optional
        Array/vector or bottom values for filled area. Default is 0.

    step_where : {'pre', 'post', 'mid'}
        where the step happens, same meanings as for `step`

    **kwargs will be passed to the matplotlib fill_between() function.

    Returns
    -------
    ret : PolyCollection
       The added artist

    '''

    # Modification to account for histogram-like bin-edges
    if len(x) == len(y1) + 1 and len(y1) != 1:
        kwargs['linewidth'] = 0
        if isinstance(y2, collections.Container):
            y2_trunc = y2[0:2]
        else:
            y2_trunc = y2
        fill_between_steps(ax,
                           x[0:2],
                           y1[0:2],
                           y2=y2_trunc,
                           step_where='post',
                           **kwargs)
        kwargs['label'] = None
        return fill_between_steps(ax,
                                  x[1:],
                                  y1,
                                  y2=y2,
                                  step_where='pre',
                                  **kwargs)

    # Account for case in which there is exactly one bin
    elif len(y1) == 1:
        y1 = y1[0]
        if isinstance(y2, collections.Container):
            y2 = y2[0]
        else:
            y2 = y2

    if step_where not in ['pre', 'post', 'mid']:
        raise ValueError("where must be one of {{'pre', 'post', 'mid'}} "
                         "You passed in {wh}".format(wh=step_where))

    # make sure y values are up-converted to arrays
    if np.isscalar(y1):
        y1 = np.ones_like(x) * y1

    if np.isscalar(y2):
        y2 = np.ones_like(x) * y2

    # temporary array for up-converting the values to step corners
    # 3 x 2N - 1 array

    vertices = np.vstack((x, y1, y2))

    # this logic is lifted from lines.py
    # this should probably be centralized someplace
    if step_where == 'pre':
        steps = ma.zeros((3, 2 * len(x) - 1), np.float)
        steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
        steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]

    elif step_where == 'post':
        steps = ma.zeros((3, 2 * len(x) - 1), np.float)
        steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
        steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]

    elif step_where == 'mid':
        steps = ma.zeros((3, 2 * len(x)), np.float)
        steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
        steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
        steps[0, 0] = vertices[0, 0]
        steps[0, -1] = vertices[0, -1]
        steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
    else:
        raise RuntimeError(
            "should never hit end of if-elif block for validated input")

    # un-pack
    xx, yy1, yy2 = steps

    # now to the plotting part:
    return ax.fill_between(xx, yy1, y2=yy2, **kwargs)
Example #35
0
    def __init__(
        self,
        Daq_sample_rate,
        edge_volt,
        pixel_number=500,
        average_number=1,
        continuous=False,
        return_image=True,
    ):
        """
        Object to run raster PMT scanning.

        Parameters
        ----------
        Daq_sample_rate : int
            Sampling rate used to generate the waveforms.
        edge_volt : float
            The bounding raster scan voltage.
        pixel_number : int, optional
            Number of pixels in the final image. The default is 500.
        average_number : int, optional
            Number of frames to average on. The default is 1.
        continuous : TYPE, optional
            Whether to do continuous scanning or not. The default is False.
        return_image : TYPE, optional
            Whether return the processed image. The default is True.

        Returns
        -------
        None.

        """

        # ---------------------Generate the waveforms---------------------------
        self.Daq_sample_rate = Daq_sample_rate
        self.averagenum = average_number
        self.edge_volt = edge_volt
        self.pixel_number = pixel_number
        self.flag_continuous = continuous
        self.flag_return_image = return_image

        # Generate galvo samples
        self.samples_X, self.samples_Y = NIDAQ.wavegenerator.waveRecPic(
            sampleRate=self.Daq_sample_rate,
            imAngle=0,
            voltXMin=-1 * self.edge_volt,
            voltXMax=self.edge_volt,
            voltYMin=-1 * self.edge_volt,
            voltYMax=self.edge_volt,
            xPixels=self.pixel_number,
            yPixels=self.pixel_number,
            sawtooth=True,
        )
        # Calculate number of all samples to feed to daq.
        self.Totalscansamples = len(self.samples_X) * self.averagenum
        # Number of samples of each individual line of x scanning, including fly backs.
        # Devided by pixel number as it's repeated for each y line.
        self.total_X_sample_number = int(len(self.samples_X) / self.pixel_number)

        self.repeated_samples_X = np.tile(self.samples_X, self.averagenum)
        self.repeated_samples_Y = np.tile(self.samples_Y, self.averagenum)

        self.Galvo_samples = np.vstack(
            (self.repeated_samples_X, self.repeated_samples_Y)
        )
Example #36
0
 def __add__(self, pop):
     """
     描述: 种群个体合并
     用法: 假设pop1, pop2是两个种群,它们的个体数可以相等也可以不相等,此时
          pop = pop1 + pop2,即可完成对pop1和pop2两个种群个体的合并
     """
     
     if self.Encoding != pop.Encoding:
         raise RuntimeError('error in Population: Encoding disagree. (两种群染色体的编码方式必须一致。)')
     if self.conordis != pop.conordis:
         raise RuntimeError('error in Population: Conordis disagree. (两种群染色体所代表的变量的连续或离散性必须一致。)')
     if np.all(self.Field == pop.Field) == False:
         raise RuntimeError('error in Population: Field disagree. (两者的译码矩阵必须一致。)')
     return Population(self.Encoding, self.conordis, self.Field, self.sizes + pop.sizes, np.vstack([self.Chrom, pop.Chrom]), np.vstack([self.ObjV, pop.ObjV]), np.vstack([self.FitnV, pop.FitnV]), np.vstack([self.CV, pop.CV]), np.vstack([self.Phen, pop.Phen]))
Example #37
0
def load_polygons(map_loc, polygons = {}):
    """
    Load polygons for a given file 
    """
    with io.open(map_loc, mode='r', encoding="ISO-8859-1") as data_file:
        json_map = json.load(data_file)

    features = json_map['features']
    location_points = {}  ## location points will be stored here
    # polygons = {}         ## polygons will be stored here
    ## key name for each feature
    if 'name' in features[0]['properties']:
        locName = 'name'
    elif 'Name' in features[0]['properties']:
        locName = 'Name'
    elif 'NAME' in features[0]['properties']:
        locName = 'NAME'
    else:
        print("Name property not found in GeoJSON.")
        sys.exit(0)

    for loc in features: ## iterate through features (locations)
        if loc['geometry']:
            # print(loc['geometry'])
            poly = np.asarray(loc['geometry']['coordinates']) ## get coordinates
            ## standardised location name (remove diacritics)
            # print("Loading", loc['properties'][locName])
            location = removeDiacritics(loc['properties'][locName])
            polygons[location] = []
            location_points[location] = []
            if loc['geometry']['type'] == 'MultiPolygon': ## multiple parts detected
                for part in np.asarray(poly): ## iterate over each component polygon
                    for coords in np.asarray(part): ## iterate over coordinates
                        coords = np.array(coords)
                        xs = coords[:, 0] ## longitudes
                        ys = coords[:, 1] ## latitudes
                        # TODO: There are six countries that are both in Eastern and Western
                        # Hemisphere's across the 180th meridian. Plotting those may be a pain.
                        # We should be normalizing them based on the country's polygons and 
                        # other polygons in the spread, but for now ignore such polygons.
                        # Ignore polygons in Alaska that in the eastern hemisphere
                        if location == "Alaska":
                            if any(i > 0 for i in xs):
                                break
                        # Ignore polygons in Russia that in the western hemisphere
                        if location in ["Russia", "Russian Federation"]:
                            if any(i < 0 for i in xs):
                                # print("Skipping", len(xs), len(ys), xs)
                                break
                        ## append coordinates to location's list of coordinates
                        location_points[location].append(np.vstack(zip(xs, ys)))
            if loc['geometry']['type'] == 'Polygon': ## location is single part
                for coords in np.asarray(poly): ## iterate over coordinates
                    coords = np.array(coords)
                    xs = coords[:, 0] ## longitudes
                    ys = coords[:, 1] ## latitudes
                    ## append coordinates to location's list of coordinates
                    location_points[location].append(np.vstack(zip(xs, ys)))
            complete_location = []
            ## iterate and create a polygon for each component of a location
            for part in location_points[location]:
                complete_location.append(Polygon(part, True))
            ## assign list of polygons to a location
            polygons[location] = complete_location
    # print('%d polygons loaded:\n%s'%(len(polygons.keys()), polygons.keys()))
    # sys.exit(0)
    return polygons
Example #38
0
def TrainModel(idfold=0):

  from setupmodel import GetSetupKfolds, GetCallbacks, GetOptimizer, GetLoss
  from buildmodel import get_unet, thick_slices, unthick_slices, unthick

  ###
  ### set up output, logging and callbacks 
  ###

  kfolds = settings.options.kfolds

  logfileoutputdir= '%s/%03d/%03d' % (settings.options.outdir, kfolds, idfold)
  os.system ('mkdir -p ' + logfileoutputdir)
  os.system ('mkdir -p ' + logfileoutputdir + '/nii')
  os.system ('mkdir -p ' + logfileoutputdir + '/liver')
  print("Output to\t", logfileoutputdir)
  
  
   ###
   ### load data
   ###

  print('loading memory map db for large dataset')
  numpydatabase = np.load(settings._globalnpfile)
  (train_index,test_index,valid_index) = GetSetupKfolds(settings.options.dbfile, kfolds, idfold)

  print('copy data subsets into memory...')
  axialbounds = numpydatabase['axialliverbounds']
  dataidarray = numpydatabase['dataid']
  
  dbtrainindex = np.isin(dataidarray, train_index )
  dbtestindex  = np.isin(dataidarray, test_index  )
  dbvalidindex = np.isin(dataidarray, valid_index ) 
  
  subsetidx_train  = np.all( np.vstack((axialbounds , dbtrainindex)) , axis=0 )
  subsetidx_test   = np.all( np.vstack((axialbounds , dbtestindex )) , axis=0 )
  subsetidx_valid  = np.all( np.vstack((axialbounds , dbvalidindex)) , axis=0 )
  
  print(np.sum(subsetidx_train) + np.sum(subsetidx_test) + np.sum(subsetidx_valid))
  print(min(np.sum(axialbounds ),np.sum(dbtrainindex )))
  
  if np.sum(subsetidx_train) + np.sum(subsetidx_test) + np.sum(subsetidx_valid) != min(np.sum(axialbounds ),np.sum(dbtrainindex )) :
      raise("data error: slice numbers dont match")

  print('copy memory map from disk to RAM...')
  trainingsubset = numpydatabase[subsetidx_train]
  validsubset    = numpydatabase[subsetidx_valid]
  testsubset     = numpydatabase[subsetidx_test]

#  np.random.seed(seed=0)
#  np.random.shuffle(trainingsubset)
  
  ntrainslices = len(trainingsubset)
  nvalidslices = len(validsubset)

  if settings.options.D3:
      x_data  = trainingsubset['imagedata']
      y_data  = trainingsubset['truthdata']
      x_valid = validsubset['imagedata']
      y_valid = validsubset['truthdata']
      
      x_train = thick_slices(x_data, settings.options.thickness, trainingsubset['dataid'], train_index)
      y_train = thick_slices(y_data, settings.options.thickness, trainingsubset['dataid'], train_index)
      
      x_valid = thick_slices(x_valid, settings.options.thickness, validsubset['dataid'], valid_index)
      y_valid = thick_slices(y_valid, settings.options.thickness, validsubset['dataid'], valid_index)
      
      np.random.seed(seed=0)
      train_shuffle = np.random.permutation(x_train.shape[0])
      valid_shuffle = np.random.permutation(x_valid.shape[0])
      x_train = x_train[train_shuffle,...]
      y_train = y_train[train_shuffle,...]
      x_valid = x_valid[valid_shuffle,...]
      y_valid = y_valid[valid_shuffle,...]
      
  elif settings.options.D25: 
      x_data  = trainingsubset['imagedata']
      y_data  = trainingsubset['truthdata']
      x_valid = validsubset['imagedata']
      y_valid = validsubset['truthdata']
      
      x_train = thick_slices(x_data, settings.options.thickness, trainingsubset['dataid'], train_index)
      x_valid = thick_slices(x_valid, settings.options.thickness, validsubset['dataid'], valid_index)
      
      y_train = thick_slices(y_data, 1, trainingsubset['dataid'], train_index)
      y_valid = thick_slices(y_valid, 1, validsubset['dataid'], valid_index)
      
      np.random.seed(seed=0)
      train_shuffle = np.random.permutation(x_train.shape[0])
      valid_shuffle = np.random.permutation(x_valid.shape[0])
      x_train = x_train[train_shuffle,...]
      y_train = y_train[train_shuffle,...]
      x_valid = x_valid[valid_shuffle,...]
      y_valid = y_valid[valid_shuffle,...]
  
  else: 
      np.random.seed(seed=0)
      np.random.shuffle(trainingsubset)
      
      x_train=trainingsubset['imagedata']
      y_train=trainingsubset['truthdata']
      x_valid=validsubset['imagedata']
      y_valid=validsubset['truthdata']
  

#  slicesplit        = int(0.9 * totnslice)
#  TRAINING_SLICES   = slice(         0, slicesplit)
#  VALIDATION_SLICES = slice(slicesplit, totnslice )


  print("\nkfolds : ", kfolds)
  print("idfold : ", idfold)
  print("slices training   : ", ntrainslices)
  print("slices validation : ", nvalidslices)
  try:
      print("slices testing    : ", len(testsubset))
  except:
      print("slices testing    : 0")


  ###
  ### data preprocessing : applying liver mask
  ###
  y_train_typed = y_train.astype(settings.SEG_DTYPE)
  y_train_liver = preprocess.livermask(y_train_typed)
  
  x_train_typed = x_train
  x_train_typed = preprocess.window(x_train_typed, settings.options.hu_lb, settings.options.hu_ub)
  x_train_typed = preprocess.rescale(x_train_typed, settings.options.hu_lb, settings.options.hu_ub)

  y_valid_typed = y_valid.astype(settings.SEG_DTYPE)
  y_valid_liver = preprocess.livermask(y_valid_typed)
  
  x_valid_typed = x_valid
  x_valid_typed = preprocess.window(x_valid_typed, settings.options.hu_lb, settings.options.hu_ub)
  x_valid_typed = preprocess.rescale(x_valid_typed, settings.options.hu_lb, settings.options.hu_ub)

#  liver_idx = y_train_typed > 0
#  y_train_liver = np.zeros_like(y_train_typed)
#  y_train_liver[liver_idx] = 1
#
#  tumor_idx = y_train_typed > 1
#  y_train_tumor = np.zeros_like(y_train_typed)
#  y_train_tumor[tumor_idx] = 1
#
#  x_masked = x_train * y_train_liver - 100.0*(1.0 - y_train_liver)
#  x_masked = x_masked.astype(settings.IMG_DTYPE)



  ###
  ### create and run model   tf.keras.losses.mean_squared_error,
  ###
  opt                 = GetOptimizer()
  callbacks, modelloc = GetCallbacks(logfileoutputdir, "liver")
  lss, met            = GetLoss()
  model               = get_unet()
  model.compile(loss       = lss,
                metrics    = met,
                optimizer  = opt)

  print("\n\n\tlivermask training...\tModel parameters: {0:,}".format(model.count_params()))

  if settings.options.D3: 
      if settings.options.augment:
          train_datagen = ImageDataGenerator3D(
              brightness_range=[0.9,1.1],
              width_shift_range=[-0.1,0.1],
              height_shift_range=[-0.1,0.1],
              horizontal_flip=True,
              vertical_flip=True,
              zoom_range=0.1,
              fill_mode='nearest',
              preprocessing_function=preprocess.post_augment
              )
          train_maskgen = ImageDataGenerator3D()
      else:
          train_datagen = ImageDataGenerator3D()
          train_maskgen = ImageDataGenerator3D()
          
      valid_datagen = ImageDataGenerator3D()
      valid_maskgen = ImageDataGenerator3D()
  else:
      if settings.options.augment:
          train_datagen = ImageDataGenerator2D(
              brightness_range=[0.9,1.1],
              width_shift_range=[-0.1,0.1],
              height_shift_range=[-0.1,0.1],
              horizontal_flip=True,
              vertical_flip=True,
              zoom_range=0.1,
              fill_mode='nearest',
              preprocessing_function=preprocess.post_augment
              )
          train_maskgen = ImageDataGenerator2D()
      else:
          train_datagen = ImageDataGenerator2D()
          train_maskgen = ImageDataGenerator2D()
          
      valid_datagen = ImageDataGenerator2D()
      valid_maskgen = ImageDataGenerator2D()
      
 
  sd = 2  # arbitrary but fixed seed for ImageDataGenerators()
  
  if settings.options.D25:
      dataflow = train_datagen.flow(x_train_typed,
                                    batch_size=settings.options.trainingbatch,
                                    seed=sd,
                                    shuffle=True)
      maskflow = train_maskgen.flow(y_train_liver,
                                    batch_size=settings.options.trainingbatch,
                                    seed=sd,
                                    shuffle=True)
      
      validdataflow = valid_datagen.flow(x_valid_typed,
                                         batch_size=settings.options.validationbatch,
                                         seed=sd,
                                         shuffle=True)
      validmaskflow = valid_maskgen.flow(y_valid_liver,
                                         batch_size=settings.options.validationbatch,
                                         seed=sd,
                                         shuffle=True)
  else: 
      dataflow = train_datagen.flow(x_train_typed[...,np.newaxis],
                                    batch_size=settings.options.trainingbatch,
                                    seed=sd,
                                    shuffle=True)
      maskflow = train_maskgen.flow(y_train_liver[...,np.newaxis],
                                    batch_size=settings.options.trainingbatch,
                                    seed=sd,
                                    shuffle=True)
      
      validdataflow = valid_datagen.flow(x_valid_typed[...,np.newaxis],
                                         batch_size=settings.options.validationbatch,
                                         seed=sd,
                                         shuffle=True)
      validmaskflow = valid_maskgen.flow(y_valid_liver[...,np.newaxis],
                                         batch_size=settings.options.validationbatch,
                                         seed=sd,
                                         shuffle=True)
   
  train_generator = zip(dataflow, maskflow)  
  valid_generator = zip(validdataflow, validmaskflow)
      
  history_liver = model.fit_generator(
                        train_generator,
                        steps_per_epoch= ntrainslices // settings.options.trainingbatch,
                        validation_steps = nvalidslices // settings.options.validationbatch,
                        epochs=settings.options.numepochs,
                        validation_data=valid_generator,
                        callbacks=callbacks,
                        shuffle=True)



  ###
  ### make predicions on validation set
  ###
  print("\n\n\tapplying models...")
  
  if settings.options.D25:
      y_pred_float = model.predict( x_valid_typed )[...,0] #[...,settings.options.thickness] )
  else: 
      y_pred_float = model.predict( x_valid_typed[...,np.newaxis] )[...,0]
      
  y_pred_seg   = (y_pred_float >= settings.options.segthreshold).astype(settings.SEG_DTYPE)    

  if settings.options.D3:
      x_valid       = unthick(x_valid, settings.options.thickness, validsubset['dataid'], valid_index)
      y_valid       = unthick(y_valid, settings.options.thickness, validsubset['dataid'], valid_index)
      
      y_valid_liver = unthick(y_valid_liver, settings.options.thickness, validsubset['dataid'], valid_index)
      y_pred_float  = unthick(y_pred_float, settings.options.thickness, validsubset['dataid'], valid_index)
      y_pred_seg    = unthick(y_pred_seg, settings.options.thickness, validsubset['dataid'], valid_index)

  print("\tsaving to file...")
  
  trueinnii     = nib.Nifti1Image(x_valid,       None)
  truesegnii    = nib.Nifti1Image(y_valid,       None)
#  windownii     = nib.Nifti1Image(x_valid_typed, None)
  truelivernii  = nib.Nifti1Image(y_valid_liver, None)
  predsegnii    = nib.Nifti1Image(y_pred_seg, None )
  predfloatnii  = nib.Nifti1Image(y_pred_float, None)
  
  trueinnii.to_filename(    logfileoutputdir+'/nii/trueimg.nii.gz')
  truesegnii.to_filename(   logfileoutputdir+'/nii/truseg.nii.gz')
#  windownii.to_filename(    logfileoutputdir+'/nii/windowedimg.nii.gz')
  truelivernii.to_filename( logfileoutputdir+'/nii/trueliver.nii.gz')
  predsegnii.to_filename(   logfileoutputdir+'/nii/predtumorseg.nii.gz')
  predfloatnii.to_filename( logfileoutputdir+'/nii/predtumorfloat.nii.gz')

  print("t\done saving.")
  return modelloc
 hdfpath = DATAPATH + 'pca/' + RESP + '.hdf' 
 newpath = DATAPATH + WRITE_DIR + '/' + RESP + '.hdf'
 
 with h5py.File(hdfpath,'r') as read_file:
     with h5py.File(newpath, 'w') as write_file:
         # train set
         l = SLIDE_LENGTH
         for i in trange(12, desc='EVSet', leave=False):
             for j in trange(30, desc='EVNo'):
                 TR_EVSet = str(i + 1).zfill(2)
                 TR_EVNo = str(j + 1).zfill(2)
                 
                 r = l + train_cm_length[i * 30 + j]
                 if ( r > 7200) :
                     write_file.create_dataset(TR_EVSet + '_' + TR_EVNo, 
                                               data=np.vstack((read_file['resp_trn'][l:7200], read_file['resp_trn'][0: SLIDE_LENGTH])))
                 else :
                     write_file.create_dataset(TR_EVSet + '_' + TR_EVNo, data=read_file['resp_trn'][l:r])
                 l = r
         # test set 1
         l = SLIDE_LENGTH
         for j in trange(30, desc='EVNo'):
             TR_EVSet = '13'
             TR_EVNo = str(j + 1).zfill(2)
             
             r = l + test_cm_length1[j]
             if ( r > 600) :
                 write_file.create_dataset(TR_EVSet + '_' + TR_EVNo, 
                                           data=np.vstack((read_file['resp_val'][l:600], read_file['resp_val'][0: SLIDE_LENGTH])))
             else :
                 write_file.create_dataset(TR_EVSet + '_' + TR_EVNo, data=read_file['resp_val'][l:r])
Example #40
0
g = np.array([[1,2],[3,4]])
h = np.array([[5,6],[7,8]])

#using Horizontal stack
i = np.hstack((g,h))
print(i)


# In[39]:


g = np.array([[1,2],[3,4]])
h = np.array([[5,6],[7,8]])

#using Vertical stack
i = np.vstack((g,h))
print(i)


# In[40]:


g = np.array([range(9)])
print(g)


# In[ ]:



Example #41
0
                               cv2.THRESH_OTSU + cv2.THRESH_BINARY)
        th4[th4 == 255] = 1

        th5 = np.array(th4, dtype=np.uint8)
        th5[th5 == 1] = 255
        # image = invert(th4)
        skeleton = skeletonize(th4)
        skeleton = np.array(skeleton, dtype=np.uint8)
        skeleton[skeleton == 1] = 255

        row0 = np.hstack((crab, result))
        # row0 = np.hstack((crab_ch, result))
        row1 = np.hstack((opening, blur))
        row2 = np.hstack((th5, skeleton))

        res1 = np.vstack((row1, row2))
        res = np.vstack((row0, res1))

        # contours = measure.find_contours(red, 0.8)
        # new = filters.sobel(crab_red)
        # new = np.array(new, dtype=np.uint8)
        # new[new == 1] = 255

        # Sobel filter
        # new_x = cv2.Sobel(crab_red, cv2.CV_32F, 1, 0)
        # new_y = cv2.Sobel(crab_red, cv2.CV_32F, 0, 1)
        # new_xcvt = cv2.convertScaleAbs(new_x)
        # new_ycvt = cv2.convertScaleAbs(new_y)
        # new = cv2.addWeighted(new_xcvt, 0.5, new_ycvt, 0.5, 0)

        # Adjust exposure with Gamma and Logarithmic correction
Example #42
0
def test_sample_weight_invariance(n_samples=50):
    random_state = check_random_state(0)

    # binary output
    random_state = check_random_state(0)
    y_true = random_state.randint(0, 2, size=(n_samples, ))
    y_pred = random_state.randint(0, 2, size=(n_samples, ))
    y_score = random_state.random_sample(size=(n_samples,))
    for name in ALL_METRICS:
        if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
                name in METRIC_UNDEFINED_MULTICLASS):
            continue
        metric = ALL_METRICS[name]
        if name in THRESHOLDED_METRICS:
            yield check_sample_weight_invariance, name, metric, y_true, y_score
        else:
            yield check_sample_weight_invariance, name, metric, y_true, y_pred

    # multiclass
    random_state = check_random_state(0)
    y_true = random_state.randint(0, 5, size=(n_samples, ))
    y_pred = random_state.randint(0, 5, size=(n_samples, ))
    y_score = random_state.random_sample(size=(n_samples, 5))
    for name in ALL_METRICS:
        if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
                name in METRIC_UNDEFINED_MULTICLASS):
            continue
        metric = ALL_METRICS[name]
        if name in THRESHOLDED_METRICS:
            yield check_sample_weight_invariance, name, metric, y_true, y_score
        else:
            yield check_sample_weight_invariance, name, metric, y_true, y_pred

    # multilabel sequence
    y_true = 2 * [(1, 2, ), (1, ), (0, ), (0, 1), (1, 2)]
    y_pred = 2 * [(0, 2, ), (2, ), (0, ), (2, ), (1,)]
    y_score = random_state.randn(10, 3)

    for name in MULTILABELS_METRICS:
        if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
            continue
        metric = ALL_METRICS[name]

        if name in THRESHOLDED_METRICS:
            yield (check_sample_weight_invariance, name, metric, y_true,
                   y_score)
        else:
            yield (check_sample_weight_invariance, name, metric, y_true,
                   y_pred)

    # multilabel indicator
    _, ya = make_multilabel_classification(
        n_features=1, n_classes=20,
        random_state=0, n_samples=100,
        return_indicator=True, allow_unlabeled=False)
    _, yb = make_multilabel_classification(
        n_features=1, n_classes=20,
        random_state=1, n_samples=100,
        return_indicator=True, allow_unlabeled=False)
    y_true = np.vstack([ya, yb])
    y_pred = np.vstack([ya, ya])
    y_score = random_state.randint(1, 4, size=y_true.shape)

    for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
                 MULTIOUTPUT_METRICS):
        if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
            continue

        metric = ALL_METRICS[name]
        if name in THRESHOLDED_METRICS:
            yield (check_sample_weight_invariance, name, metric, y_true,
                   y_score)
        else:
            yield (check_sample_weight_invariance, name, metric, y_true,
                   y_pred)
Example #43
0
def SVM_One_vs_One(distances, semanticLabels, targetTrainingIndice,
                   targetTestingIndice):
    # Construct base kernels
    baseKernels = []
    for i in range(len(distances)):
        distance = distances[i]
        distance = distance**2
        trainingDistances = sliceArray(distance, targetTrainingIndice)

        # Define kernel parameters
        gramma0 = 1.0 / np.mean(trainingDistances)
        kernel_params = [gramma0 * (2**index) for index in range(-3, 2, 1)]

        # Construct base kernels & pre-learned classifier
        baseKernel = constructBaseKernels(["rbf", "lap", "isd", "id"],
                                          kernel_params, distance)
        baseKernels += baseKernel

    # Build a classifier for each pair
    labelSet = ["birthday", "parade", "picnic", "show", "sports", "wedding"]
    pairs = util.paris(labelSet)

    # Update base kernels, value is duplicated within the same domain
    for baseKernel in baseKernels:
        baseKernel[np.ix_([i for i in range(195)],
                          [i for i in range(195)])] = 2 * baseKernel[np.ix_(
                              [i for i in range(195)], [i
                                                        for i in range(195)])]
        baseKernel[np.ix_(
            [i for i in range(195, 1101, 1)],
            [i for i in range(195, 1101, 1)
             ])] = 2 * baseKernel[np.ix_([i for i in range(195, 1101, 1)],
                                         [i for i in range(195, 1101, 1)])]

    stackOfPredictions = []
    for pair in pairs:

        positiveClass = pair[0]
        negativeClass = pair[1]

        thisTrainIndices = []
        binaryLabels = []

        for i in targetTrainingIndice:
            if semanticLabels[i] in pair:
                thisTrainIndices.append(i)

                if semanticLabels[i] == positiveClass:
                    binaryLabels.append(1)
                elif semanticLabels[i] == negativeClass:
                    binaryLabels.append(-1)

        finalTestScores = []
        for m in range(len(baseKernels)):
            baseKernel = baseKernels[m]
            Ktrain = sliceArray(baseKernel, thisTrainIndices)
            Ktest = baseKernel[np.ix_(targetTestingIndice, thisTrainIndices)]

            clf = SVC(kernel="precomputed")
            clf.fit(Ktrain, binaryLabels)

            dv = clf.decision_function(Ktest)
            finalTestScores.append(dv.reshape((1, len(targetTestingIndice))))

        finalTestScores = np.vstack(finalTestScores)

        tempFinalTestScores = 1.0 / (1 + math.e**(-finalTestScores))
        finalTestScores = np.mean(tempFinalTestScores, axis=0)

        thisPredictLabels = []
        for score in finalTestScores:
            if score < 0.5:
                thisPredictLabels.append(negativeClass)
            else:
                thisPredictLabels.append(positiveClass)
        stackOfPredictions.append(thisPredictLabels)

    stackOfPredictions = np.array(stackOfPredictions)

    finalLabels = []
    shape = stackOfPredictions.shape
    for i in range(shape[1]):
        temp = []
        for j in range(shape[0]):
            temp.append(stackOfPredictions[j][i])
        dict = {}
        for item in temp:
            if item in dict.keys():
                dict[item] = dict[item] + 1
            else:
                dict[item] = 1

        keys = dict.keys()
        curLabel = keys[0]
        curVal = dict[curLabel]

        for l in keys[1:]:
            if curVal < dict[l]:
                curLabel = l
                curVal = dict[curLabel]
        finalLabels.append(curLabel)

    return finalLabels
Example #44
0
def voronoi_vertex(domain, Xhat, L=None):
    r""" Construct the bounded Voronoi vertices on a domain 




	Note: 
	Pro17 claims that these bounded Voronoi vertices can be computed using WP89.  
	This approach may offer some speedup, but I trust Q-hull more than my own implementation.

	"""
    if len(domain) == 1:
        return voronoi_vertex_1d(domain, Xhat)

    assert np.all(domain.isinside(Xhat))

    if L is None:
        L = np.eye(len(domain))
        Linv = np.eye(len(domain))
    else:
        U, s, VT = np.linalg.svd(L)
        Linv = VT.T @ np.diag(1. / s) @ U.T

    # Half spaces from the domain
    A, b = domain.A_aug, domain.b_aug

    if len(Xhat) == 1:
        halfspaces = np.hstack([A, -b.reshape(-1, 1)])
        hs = HalfspaceIntersection(halfspaces, Xhat[0])
        return hs.intersections

    Yhat = (L @ Xhat.T).T
    LV = []
    for k, xhat in enumerate(Xhat):
        Ak = Yhat - Yhat[k]
        #Ak = np.vstack([L @ (Xhat[j] - xhat) for j in range(len(Xhat)) if j != k])
        center = 0.5 * (Yhat[k] + Yhat)
        #center = np.vstack([ 0.5*L @ (xhat + Xhat[j]) for j in range(len(Xhat)) if j != k])
        bk = np.sum(Ak * center, axis=1)

        # If two Xhat are the same, we can end up with a situation
        # where the constraint stops  so we filter these out
        I = np.argwhere(np.sum(Ak**2, axis=1) > 0).flatten()
        Ak, bk = Ak[I], bk[I]

        halfspaces = np.hstack(
            [np.vstack([A @ Linv, Ak]), -np.hstack([b, bk]).reshape(-1, 1)])

        try:
            hs = HalfspaceIntersection(halfspaces, xhat)
        except QhullError:
            # The point xhat isn't strictly inside the constraints, so we try with a point that is
            # Here we use the code from scipy documentation
            norm_vector = np.reshape(
                np.linalg.norm(halfspaces[:, :-1], axis=1),
                (halfspaces.shape[0], 1))
            cc = np.zeros((halfspaces.shape[1], ))
            cc[-1] = -1
            AA = np.hstack((halfspaces[:, :-1], norm_vector))
            bb = -halfspaces[:, -1:]

            # Solve the linear program with CVXOPT as scipy's linprog reports ill-conditioning
            sol = solvers.lp(matrix(cc), matrix(AA), matrix(bb))
            xhat2 = np.array(sol['x'][:-1]).flatten()

            try:
                hs = HalfspaceIntersection(halfspaces, xhat2)
            except QhullError as e:
                raise e

        # TODO: Do we want to keep indexing information, or only return the vertices
        LV.append(np.copy(hs.intersections))

    LV = np.vstack(LV)
    V = (Linv @ LV.T).T
    I = unique_points(V)
    V = V[I]

    I = domain.isinside(V)
    V = V[I]
    return V
Example #45
0
ROrd = 1
#We use NTs in wrapT, so we make it a global variable. Gives the index
#of fit variables of different types
global NTs
NTs = ([BOrd, BOrd + ROrd])
pT = np.zeros(BOrd + ROrd)
#Have a run index after which temperature rises
tendCut = 3570
tstartCut = 170
#Find that index in the data arrays
place = np.where(runsTemp == tendCut)[0]
print(runsTemp)
#Keep only data before the cutoff index
pT[BOrd] = np.mean(tempsTemp[tstartCut:place[0]])
#Stack the runs and B data so that it can be passed as one variable to the fit function
RandB = np.vstack((runsTemp[tstartCut:place[0]], BsTemp[tstartCut:place[0]])).T
#Call the fit function, this syntax is terrible but works. lambda essentially 
#defines a new function that we can pass as our fit function
popt, pcov = curve_fit(lambda RandB, *pT: wrapT(RandB, *pT), RandB, tempsTemp[tstartCut:place[0]], p0=pT)
#if you want to see fit results for temperature, uncomment here
#    print(popt)
#    print(popt/np.sqrt(np.diag(pcov
    
#In order to get the true temperature, we need to subtract off the magnetic 
#field part. So define a parameter array where the run-dependent fit parts are 0
#and the magnetic field dependent parts are the results of the fit
pSub = np.zeros(len(pT))
pSub[:BOrd] = popt[:BOrd]

for h in range(0, len(fname)):
Example #46
0
def runSVM_T_One_vs_All(distances, labels, all_trainingIndices,
                        targetTestingIndice):

    # Construct base kernels
    baseKernels = []
    for i in range(len(distances)):
        distance = distances[i]
        distance = distance**2
        trainingDistances = sliceArray(distance, all_trainingIndices)

        # Define kernel parameters
        gramma0 = 1.0 / np.mean(trainingDistances)
        kernel_params = [gramma0 * (2**index) for index in range(-3, 2, 1)]

        # Construct base kernels & pre-learned classifier
        baseKernel = constructBaseKernels(["rbf", "lap", "isd", "id"],
                                          kernel_params, distance)
        baseKernels += baseKernel

    # Update base kernels, value is duplicated within the same domain
    for baseKernel in baseKernels:
        baseKernel[np.ix_([i for i in range(195)],
                          [i for i in range(195)])] = 2 * baseKernel[np.ix_(
                              [i for i in range(195)], [i
                                                        for i in range(195)])]
        baseKernel[np.ix_(
            [i for i in range(195, 1101, 1)],
            [i for i in range(195, 1101, 1)
             ])] = 2 * baseKernel[np.ix_([i for i in range(195, 1101, 1)],
                                         [i for i in range(195, 1101, 1)])]

    scores = []
    for classNum in range(labels.shape[1]):
        thisClassLabels = labels[::, classNum]
        TrainingLabels = [
            thisClassLabels[index] for index in all_trainingIndices
        ]
        testingLabels = [
            thisClassLabels[index] for index in targetTestingIndice
        ]

        # pre-learned classifiers' score
        finalTestScores = np.zeros((len(targetTestingIndice))).reshape(
            (1, len(targetTestingIndice)))

        for m in range(len(baseKernels)):
            baseKernel = baseKernels[m]

            Ktrain = sliceArray(baseKernel, all_trainingIndices)
            Ktest = baseKernel[np.ix_(targetTestingIndice,
                                      all_trainingIndices)]

            clf = SVC(kernel="precomputed")
            clf.fit(Ktrain, TrainingLabels)

            dv = clf.decision_function(Ktest)
            finalTestScores = np.vstack(
                (finalTestScores, dv.reshape((1, len(targetTestingIndice)))))

        # Fuse final scores together
        finalTestScores = finalTestScores[1:]
        tempFinalTestScores = 1.0 / (1 + math.e**(-finalTestScores))
        finalTestScores = np.mean(tempFinalTestScores, axis=0)

        scores.append(finalTestScores)

    # Find the label with the largest score
    scores = np.vstack(scores)
    ranks = np.argmax(scores, axis=0)

    labelSet = ["birthday", "parade", "picnic", "show", "sports", "wedding"]
    predictLabels = [labelSet[i] for i in ranks]

    return predictLabels
Example #47
0
File: stats.py Project: 17ai/pymc3
def loo(trace, model=None, pointwise=False, progressbar=False):
    """Calculates leave-one-out (LOO) cross-validation for out of sample predictive
    model fit, following Vehtari et al. (2015). Cross-validation is computed using
    Pareto-smoothed importance sampling (PSIS).

    Parameters
    ----------
    trace : result of MCMC run
    model : PyMC Model
        Optional model. Default None, taken from context.
    pointwise: bool
        if True the pointwise predictive accuracy will be returned.
        Default False
    progressbar: bool
        Whether or not to display a progress bar in the command line. The
        bar shows the percentage of completion, the evaluation speed, and
        the estimated time to completion

    Returns
    -------
    namedtuple with the following elements:
    loo: approximated Leave-one-out cross-validation
    loo_se: standard error of loo
    p_loo: effective number of parameters
    loo_i: and array of the pointwise predictive accuracy, only if pointwise True
    """
    model = modelcontext(model)

    log_py = _log_post_trace(trace, model, progressbar=progressbar)
    if log_py.size == 0:
        raise ValueError('The model does not contain observed values.')

    # Importance ratios
    r = np.exp(-log_py)
    r_sorted = np.sort(r, axis=0)

    # Extract largest 20% of importance ratios and fit generalized Pareto to each
    # (returns tuple with shape, location, scale)
    q80 = int(len(log_py) * 0.8)
    pareto_fit = np.apply_along_axis(
        lambda x: pareto.fit(x, floc=0), 0, r_sorted[q80:])

    if np.any(pareto_fit[0] > 0.7):
        warnings.warn("""Estimated shape parameter of Pareto distribution is
        greater than 0.7 for one or more samples.
        You should consider using a more robust model, this is
        because importance sampling is less likely to work well if the marginal
        posterior and LOO posterior are very different. This is more likely to
        happen with a non-robust model and highly influential observations.""")

    elif np.any(pareto_fit[0] > 0.5):
        warnings.warn("""Estimated shape parameter of Pareto distribution is
        greater than 0.5 for one or more samples. This may indicate
        that the variance of the Pareto smoothed importance sampling estimate
        is very large.""")

    # Calculate expected values of the order statistics of the fitted Pareto
    S = len(r_sorted)
    M = S - q80
    z = (np.arange(M) + 0.5) / M
    expvals = map(lambda x: pareto.ppf(z, x[0], scale=x[2]), pareto_fit.T)

    # Replace importance ratios with order statistics of fitted Pareto
    r_sorted[q80:] = np.vstack(expvals).T
    # Unsort ratios (within columns) before using them as weights
    r_new = np.array([r[np.argsort(i)]
                      for r, i in zip(r_sorted.T, np.argsort(r.T, axis=1))]).T

    # Truncate weights to guarantee finite variance
    w = np.minimum(r_new, r_new.mean(axis=0) * S**0.75)

    loo_lppd_i = - 2. * logsumexp(log_py, axis=0, b=w / np.sum(w, axis=0))

    loo_lppd_se = np.sqrt(len(loo_lppd_i) * np.var(loo_lppd_i))

    loo_lppd = np.sum(loo_lppd_i)

    lppd = np.sum(logsumexp(log_py, axis=0, b=1. / log_py.shape[0]))

    p_loo = lppd + (0.5 * loo_lppd)

    if pointwise:
        LOO_r = namedtuple('LOO_r', 'LOO, LOO_se, p_LOO, LOO_i')
        return LOO_r(loo_lppd, loo_lppd_se, p_loo, loo_lppd_i)
    else:
        LOO_r = namedtuple('LOO_r', 'LOO, LOO_se, p_LOO')
        return LOO_r(loo_lppd, loo_lppd_se, p_loo)
Example #48
0
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
#多输出的决策树回归,有的时候,输出不一定只是一个。输出结果 标记之间会有些关系的。这个可以看word 记录
if __name__ == "__main__":
    #300个样本 ,每个样本从   x 是从-4 到4
    N = 300
    x = np.random.rand(N) * 8 - 4  # [-4,4)
    x.sort()
    y1 = np.sin(x) + 3 + np.random.randn(N) * 0.1
    y2 = np.cos(0.3 * x) + np.random.randn(N) * 0.01
    # y1 = np.sin(x) + np.random.randn(N) * 0.05
    # y2 = np.cos(x) + np.random.randn(N) * 0.1
    #y1 是一行 y2 是一行   vstck 一下 y 就是2行  叠加在一起
    y = np.vstack((y1, y2))
    #专置一下就得到了y 的  2 列数据
    y = np.vstack((y1, y2)).T
    x = x.reshape(-1, 1)  # 转置后,得到N个样本,每个样本都是1维的

    deep = 3
    #决策回归  深度3   标准是均方误差
    reg = DecisionTreeRegressor(criterion='mse', max_depth=deep)
    #x 是一列 数据    y是2列   是多输出的  回归树,去做拟合
    dt = reg.fit(x, y)

    #做好拟合  去做预测
    x_test = np.linspace(-4, 4, num=1000).reshape(-1, 1)
    print x_test
    y_hat = dt.predict(x_test)
    print y_hat
Example #49
0
def main():

    # setup argument parsing
    parser = argparse.ArgumentParser(description='Model Evaluation')
    parser.add_argument('-e',
                        '--experiment',
                        type=str,
                        required=True,
                        help='name of experiment')

    # optional arguments
    parser.add_argument('-m',
                        '--model',
                        type=str,
                        default='heuristic',
                        help='model for evaluation: heuristic, model name')
    parser.add_argument('-n',
                        '--episodes',
                        type=int,
                        default=10,
                        help='number of episodes for evaluation')
    parser.add_argument('--env',
                        type=str,
                        default='Shepherd-v0',
                        help='environment for testing model')
    parser.add_argument('--nocuda',
                        action='store_true',
                        default=False,
                        help='flag to switch to cpu')
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help='seed value for reproducibility')
    parser.add_argument('--results',
                        type=str,
                        default='../results/imitation/',
                        help='path to results folder')
    parser.add_argument('--store',
                        action='store_true',
                        default=False,
                        help='flag to store experience')
    parser.add_argument('--norender',
                        dest='render',
                        action='store_false',
                        default=True,
                        help='flag for rendering sim')

    # parse arguments
    args = parser.parse_args()

    # initialize torch environment
    seed = args.seed
    is_cuda = not args.nocuda

    torch.manual_seed(seed)
    np.random.seed(seed)

    if is_cuda:
        torch.cuda.manual_seed(seed)
        torch.backends.cudnn.deterministic = True

    # initialize variables
    model = args.model
    exp = args.experiment
    n_episodes = args.episodes
    store_dataset = args.store
    result_path = args.results
    experiment = args.experiment

    # create folder to save generated dataset
    if store_dataset:
        data_path = '../data'
        if not os.path.isdir(data_path):
            os.mkdir(data_path)

        exp_path = '{}/{}'.format(data_path, exp)
        if not os.path.isdir(exp_path):
            os.mkdir(exp_path)

    # create new environment
    env = gym.make(args.env)
    if args.render:
        env.render()

    # state space dimensionality
    n_state = env.observation_space.shape[0]

    print('Experiment: ', experiment)
    print('### Initialization Done ###')

    # load the il model
    if model != 'heuristic':
        # define network and optimizer
        policy = Policy(drop_rate=0.0)
        if is_cuda:
            policy.cuda()

        policy.load_state_dict(torch.load(result_path + model + '/model.pt'))
        policy.eval()

        print('### Network Created ###')

    # loop over episodes
    dataset = []
    success_trials = 0

    for n in range(n_episodes):
        done = False
        state = env.reset()
        trial_data = np.zeros((0, 2 * n_state + 5))

        # run until episode terminates
        (state, _, done, info) = env.step(0)
        while not done:
            if model != 'heuristic':
                state_var = torch.from_numpy(state[None, :].astype(np.float32))

                if is_cuda:
                    state_var = state_var.cuda()
                state_var = Variable(state_var)

                # get output action
                output, int_goal, mode = policy(state_var)
                pred = F.softmax(output, dim=1).data.max(1, keepdim=True)[1]
                action = pred.cpu().numpy()[0, 0]

                # get intermediate goal and predicted mode
                int_goal = int_goal.detach().cpu().numpy()[0]
                int_goal = int_goal.astype(np.float64)

                mode = F.softmax(mode, dim=1).data.max(1, keepdim=True)[1]
                dog_mode = mode.cpu().numpy()[0, 0]

            else:
                action, int_goal, dog_mode = dog_heuristic_model(
                    state, info, False)

            # perform action and get state
            new_state, reward, done, info = env.step(action)

            # check for success
            if done and info['s']:
                print('Success!')
                success_trials += 1.0

            # save the sample dataset
            sample = np.hstack(
                (state, int_goal, np.array([dog_mode, action,
                                            reward]), new_state))
            trial_data = np.vstack((trial_data, sample[None, :]))

            # update state variable
            state = new_state

            # render simulation
            if args.render:
                env.render(mode='detailed', subgoal=int_goal)

        dataset.append(trial_data)
    # shutdown env
    env.close()

    # save the results
    if store_dataset:
        for n in range(n_episodes):
            np.savetxt('{}/{}_{}'.format(exp_path, model, n + 1),
                       dataset[n],
                       fmt='%.3f',
                       delimiter=',')

    print('### Testing Completed ###')
    print(f'Sucess Rate: {success_trials/n_episodes}')
Example #50
0
    prog += H(2) # number=36
    prog += H(3)  # number=8
    prog += H(0)  # number=9
    prog += Y(2) # number=10
    prog += Y(2) # number=11
    # circuit end

    return prog

def summrise_results(bitstrings) -> dict:
    d = {}
    for l in bitstrings:
        if d.get(l) is None:
            d[l] = 1
        else:
            d[l] = d[l] + 1

    return d

if __name__ == '__main__':
    prog = make_circuit()
    qvm = get_qc('4q-qvm')

    results = qvm.run_and_measure(prog,1024)
    bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
    bitstrings = [''.join(map(str, l)) for l in bitstrings]
    writefile = open("../data/startPyquil2545.csv","w")
    print(summrise_results(bitstrings),file=writefile)
    writefile.close()

Example #51
0
    def __init__(self):
        print('FinnedUUVControllerNode: initializing node')

        self._ready = False

        # Test if any of the needed parameters are missing
        param_labels = ['n_fins', 'gain_roll', 'gain_pitch', 'gain_yaw',
                        'thruster_model', 'fin_topic_prefix',
                        'fin_topic_suffix', 'thruster_topic',
                        'axis_thruster', 'axis_roll', 'axis_pitch', 'axis_yaw']

        for label in param_labels:
            if not rospy.has_param('~%s' % label):
                raise rospy.ROSException('Parameter missing, label=%s' % label)

        # Number of fins
        self._n_fins = rospy.get_param('~n_fins')

        # Thruster joy axis gain
        self._thruster_joy_gain = 1
        if rospy.has_param('~thruster_joy_gain'):
            self._thruster_joy_gain = rospy.get_param('~thruster_joy_gain')

        # Read the vector for contribution of each fin on the change on
        # orientation
        gain_roll = rospy.get_param('~gain_roll')
        gain_pitch = rospy.get_param('~gain_pitch')
        gain_yaw = rospy.get_param('~gain_yaw')

        if len(gain_roll) != self._n_fins or len(gain_pitch) != self._n_fins \
            or len(gain_yaw) != self._n_fins:
            raise rospy.ROSException('Input gain vectors must have length '
                                     'equal to the number of fins')

        # Create the command angle to fin angle mapping
        self._rpy_to_fins = numpy.vstack((gain_roll, gain_pitch, gain_yaw)).T

        # Read the joystick mapping
        self._joy_axis = dict(axis_thruster=rospy.get_param('~axis_thruster'),
                              axis_roll=rospy.get_param('~axis_roll'),
                              axis_pitch=rospy.get_param('~axis_pitch'),
                              axis_yaw=rospy.get_param('~axis_yaw'))

        # Subscribe to the fin angle topics
        self._pub_cmd = list()
        self._fin_topic_prefix = rospy.get_param('~fin_topic_prefix')
        self._fin_topic_suffix = rospy.get_param('~fin_topic_suffix')
        for i in range(self._n_fins):
            topic = self._fin_topic_prefix + str(i) + self._fin_topic_suffix
            self._pub_cmd.append(
              rospy.Publisher(topic, FloatStamped, queue_size=10))

        # Create the thruster model object
        try:
            self._thruster_topic = rospy.get_param('~thruster_topic')
            self._thruster_params = rospy.get_param('~thruster_model')
            if 'max_thrust' not in self._thruster_params:
                raise rospy.ROSException('No limit to thruster output was given')
            self._thruster_model = Thruster.create_thruster(
                        self._thruster_params['name'], 0,
                        self._thruster_topic, None, None,
                        **self._thruster_params['params'])
        except:
            raise rospy.ROSException('Thruster model could not be initialized')

        # Subscribe to the joystick topic
        self.sub_joy = rospy.Subscriber('joy', numpy_msg(Joy),
                                        self.joy_callback)

        self._ready = True
def answer_one():
    from sklearn.linear_model import LinearRegression
    from sklearn.preprocessing import PolynomialFeatures

    import numpy as np
    import pandas as pd
    from sklearn.model_selection import train_test_split

    np.random.seed(0)
    n = 15
    x = np.linspace(0, 10, n) + np.random.randn(n) / 5
    y = np.sin(x) + x / 6 + np.random.randn(n) / 10

    X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)
    X_train = X_train.reshape((11, 1))
    X_predict_input = np.linspace(0, 10, 100).reshape(100, 1)

    #POLY 1
    poly1 = PolynomialFeatures(degree=1)
    X_train_poly1 = poly1.fit_transform(X_train)

    linreg = LinearRegression().fit(X_train_poly1, y_train)

    X_predict_input_poly1 = poly1.fit_transform(X_predict_input)

    y_predict_output_poly1 = linreg.predict(X_predict_input_poly1)
    y_predict_output_poly1 = y_predict_output_poly1.flatten()

    #POLY 3
    poly3 = PolynomialFeatures(degree=3)
    X_train_poly3 = poly3.fit_transform(X_train)

    linreg = LinearRegression().fit(X_train_poly3, y_train)

    X_predict_input_poly3 = poly3.fit_transform(X_predict_input)

    y_predict_output_poly3 = linreg.predict(X_predict_input_poly3)
    y_predict_output_poly3 = y_predict_output_poly3.flatten()

    #POLY 6
    poly6 = PolynomialFeatures(degree=6)
    X_train_poly6 = poly6.fit_transform(X_train)

    linreg = LinearRegression().fit(X_train_poly6, y_train)

    X_predict_input_poly6 = poly6.fit_transform(X_predict_input)

    y_predict_output_poly6 = linreg.predict(X_predict_input_poly6)
    y_predict_output_poly6 = y_predict_output_poly6.flatten()

    #POLY 9
    poly9 = PolynomialFeatures(degree=9)
    X_train_poly9 = poly9.fit_transform(X_train)

    linreg = LinearRegression().fit(X_train_poly9, y_train)

    X_predict_input_poly9 = poly9.fit_transform(X_predict_input)

    y_predict_output_poly9 = linreg.predict(X_predict_input_poly9)
    y_predict_output_poly9 = y_predict_output_poly9.flatten()

    Answer = np.vstack([
        y_predict_output_poly1, y_predict_output_poly3, y_predict_output_poly6,
        y_predict_output_poly9
    ])

    return Answer
Example #53
0
    def __init__(self,
                 root,
                 train=True,
                 transform=None,
                 target_transform=None,
                 download=False,
                 index=None,
                 num_instance_per_class=0):
        self.root = os.path.expanduser(root)
        self.transform = transform
        self.target_transform = target_transform
        self.train = train  # training set or test set

        # if download:
        #     self.download()

        # if not self._check_integrity():
        #     raise RuntimeError('Dataset not found or corrupted.' +
        #                        ' You can use download=True to download it')

        if self.train:
            downloaded_list = self.train_list
        else:
            downloaded_list = self.test_list

        self.data = []
        self.targets = []

        # now load the picked numpy arrays
        for file_name, checksum in downloaded_list:
            file_path = os.path.join(self.root, self.base_folder, file_name)
            with open(file_path, 'rb') as f:
                if sys.version_info[0] == 2:
                    entry = pickle.load(f)
                else:
                    entry = pickle.load(f, encoding='latin1')
                self.data.append(entry['data'])
                if 'labels' in entry:
                    self.targets.extend(entry['labels'])
                else:
                    self.targets.extend(entry['fine_labels'])

        self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
        self.data = self.data.transpose((0, 2, 3, 1))  # convert to HWC
        #self.data = self.data/255.
        #pdb.set_trace()
        self.targets = np.asarray(self.targets)
        #index_sort = np.argsort(self.targets)
        # Sort label and corresponding data from 0-9
        #self.data = self.data[index_sort]
        #self.targets=np.asarray(sorted(self.targets))

        self.targets = target_transform[self.targets]

        if num_instance_per_class == 0:
            self.data, self.targets = self.RandomPercentage(
                self.data, self.targets, index)
        else:
            self.data, self.targets = self.RandomExempalers(
                self.data, self.targets, index, num_instance_per_class)

        self._load_meta()
Example #54
0
zpca = pca.fit_transform(swr_modth)
pc = zpca[:, 0:2]
eigen = pca.components_

phi = np.mod(np.arctan2(zpca[:, 1], zpca[:, 0]), 2 * np.pi)

# times = np.arange(0, 1005, 5) - 500 # BAD

###############################################################################################################
# jPCA
###############################################################################################################
from scipy.sparse import lil_matrix

X = pca.components_.transpose()
dX = np.hstack(
    [np.vstack(derivative(times, X[:, i])) for i in range(X.shape[1])])
#build the H mapping for a given n
# work by lines but H is made for column based
n = X.shape[1]
H = buildHMap(n)
# X tilde
Xtilde = np.zeros((X.shape[0] * X.shape[1], X.shape[1] * X.shape[1]))
for i, j in zip((np.arange(0, n**2, n)),
                np.arange(0, n * X.shape[0], X.shape[0])):
    Xtilde[j:j + X.shape[0], i:i + X.shape[1]] = X
# put dx in columns
dXv = np.vstack(dX.transpose().reshape(X.shape[0] * X.shape[1]))
# multiply Xtilde by H
XtH = np.dot(Xtilde, H)
# solve XtH k = dXv
k, residuals, rank, s = np.linalg.lstsq(XtH, dXv)
Example #55
0
    #============================
    # Prepare data
    #============================
    # Import data
    data = load_wine()

    X = data.data

    # One-hot enconding
    y = np.zeros((data.target.shape[0], 3))
    y[:, 0] = np.where(data.target == 0, 1, 0)
    y[:, 1] = np.where(data.target == 1, 1, 0)
    y[:, 2] = np.where(data.target == 2, 1, 0)

    # Create train and test sets
    X_train = np.vstack((X[::3, :], X[1::3, :]))
    y_train = np.vstack((y[::3, :], y[1::3, :]))
    X_test = X[2::3, :]
    y_test = y[2::3, :]

    # Normalize X_train and X_test
    X_train, xmin, xmax = normalize(X_train)
    X_test, _, _ = normalize(X_test, xmin, xmax)

    #============================
    # SLP
    #============================
    w = slp_train(X_train, y_train, DS_name='Iris')  # Train
    y_hat = slp_predict(X_test, w)  # Predict
    print('SLP Accuracy:', np.mean(y_hat == y_test))  # Accuracy
Example #56
0
        val_rectified_np_scores = softmax(val_rectified_np_scores)

        # for l in range(0,(batch_number-1)*P): #rectify old classes
        for l in range(val_rectified_np_scores.shape[0]):
            val_rectified_np_scores[l] /= (nb_train_imgs_per_class[l] /
                                           total_nb_train_imgs)

        _, rectified_predicted_class = th.max(
            th.from_numpy(val_rectified_np_scores), 0)

        full_labels.append(val_image_class)
        if full_np_rectified_scores is None:
            full_np_scores = val_np_scores
            full_np_rectified_scores = val_rectified_np_scores
        else:
            full_np_scores = np.vstack((full_np_scores, val_np_scores))
            full_np_rectified_scores = np.vstack(
                (full_np_rectified_scores, val_rectified_np_scores))

        examples_counter += 1

        if examples_counter == batch_size:
            full_labels = th.from_numpy(np.array(full_labels, dtype=int))
            full_np_scores = th.from_numpy(full_np_scores)
            full_np_rectified_scores = th.from_numpy(full_np_rectified_scores)
            #compute accuracy
            prec1, prec5 = utils.accuracy(full_np_scores,
                                          full_labels,
                                          topk=(1, min(5, P * batch_number)))
            prec1_rectified, prec5_rectified = utils.accuracy(
                full_np_rectified_scores,
Example #57
0
def triangular_stack(A,B):
    q=B.shape[1]-A.shape[1]
    assert q>=0
    return np.vstack((np.hstack((A,np.zeros((A.shape[0],q)))),B))
Example #58
0
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.datasets import load_wine
from sklearn import metrics

#Carrega o wine dataset em wine
wine = load_wine()

#Separando dados para treinamento
dados1 = wine.data[0:30]
dados2 = wine.data[59:94]
dados3 = wine.data[130:154]
dados_treinamento = np.vstack([dados1, dados2, dados3])
X = dados_treinamento

dados_target1 = wine.target[0:30]
dados_target2 = wine.target[59:94]
dados_target3 = wine.target[130:154]
dados_target = np.hstack([dados_target1, dados_target2, dados_target3])
y = dados_target

#Implementa o Algoritmo KNN
neigh = KNeighborsClassifier(n_neighbors=5,weights="uniform")
neigh.fit(X, y)

#Separando dados para teste
dados4 = wine.data[30:59]
dados5 = wine.data[94:130]
dados6 = wine.data[154:178]
dados_teste = np.vstack([dados4, dados5, dados6])
Example #59
0
    def _error_zonotopes(self,T=0):
        Sigma={}
        Psi={}
        if T==0:
            T=max(self.A.keys())
        # Construction of Xi:
        self.Xi,self.Xi_reduced,self.F_reduced={},{},{}
        self.Xi["x",0]=self.C[0]
        self.Xi["w",0]=np.zeros((self.o,self.n))
        self.Xi["v",0]=np.eye(self.o)
        for t in range(T):
#            print "t=",t
            Gw=spa.block_diag(*[self.W[t].G for tau in range(0,t+1)])
            Gv=spa.block_diag(*[self.V[t].G for tau in range(0,t+1)])
            Wbar=np.vstack([self.W[tau].x for tau in range(0,t+1)])
            Vbar=np.vstack([self.V[tau].x for tau in range(0,t+1)])
            L1=np.linalg.multi_dot([self.C[t+1],self.P["x",t+1],self.X0.G])
            L2=np.linalg.multi_dot([self.C[t+1],self.P["w",t+1],Gw])
            L3=np.zeros((self.o,Gv.shape[1]))
            L4=self.V[t+1].G
            F1=np.dot(self.Q["x",t],self.X0.G)
            F2=np.dot(self.Q["w",t],Gw)
            F3=np.dot(self.Q["v",t],Gv)
            F4=np.zeros(((t+1)*self.o,self.V[t+1].G.shape[1]))
            Sigma["e",t]=np.hstack((L1,L2,L3,L4))
            Psi["e",t]=np.hstack((F1,F2,F3,F4))
#            print Sigma["e",t].shape
#            print Psi["e",t].shape
            K_1=np.linalg.multi_dot([self.D[t],self.P["x",t],self.X0.G])
            K_2=np.dot(np.hstack((np.dot(self.D[t],self.P["w",t]),np.zeros((self.z,self.n)))),Gw)
            K_3=np.zeros((self.z,Gv.shape[1]))
            P_1=np.dot(self.Q["x",t],self.X0.G)
            P_2=np.dot(self.Q["w",t],Gw)
            P_3=-np.dot(self.Q["v",t],Gv)
            Sigma["f",t]=np.hstack((K_1,K_2,K_3))
            Psi["f",t]=np.hstack((P_1,P_2,P_3))
#            print "f",Sigma["f",t].shape
#            print "f",Psi["f",t].shape   
            self.M[t]=np.dot(Sigma["e",t],np.linalg.pinv(Psi["e",t]))
            self.R[t]=np.dot(Sigma["f",t],np.linalg.pinv(Psi["f",t]))
            self.N[t]=np.dot(self.C[t+1],self.P["u",t+1])-np.dot(self.M[t],self.Q["u",t])
            self.S[t]=np.hstack(( np.dot(self.D[t],self.P["u",t]),np.zeros((self.z,self.m)) ))-np.dot(self.R[t],self.Q["u",t])
#            print self.M[t].shape
#            print self.N[t].shape
#            print self.R[t].shape
#            print self.S[t].shape
            ebar=np.linalg.multi_dot([self.C[t+1],self.P["x",t+1],self.X0.x])\
                    -np.linalg.multi_dot([self.M[t],self.Q["x",t],self.X0.x])\
                    +np.linalg.multi_dot([self.C[t+1],self.P["w",t+1],Wbar])\
                    -np.linalg.multi_dot([self.M[t],self.Q["w",t],Wbar])\
                    +np.linalg.multi_dot([self.M[t],self.Q["v",t],Vbar])\
                    +self.V[t+1].x
            eG=Sigma["e",t]-np.dot(self.M[t],Psi["e",t])
            self.E[t]=zonotope(ebar,eG)               
            fbar=np.linalg.multi_dot([self.D[t],self.P["x",t],self.X0.x])\
                    -np.linalg.multi_dot([self.R[t],self.Q["x",t],self.X0.x])\
                    +np.dot(np.hstack((np.dot(self.D[t],self.P["w",t]),np.zeros((self.z,self.n)))),Wbar)\
                    -np.linalg.multi_dot([self.R[t],self.Q["w",t],Wbar])\
                    -np.linalg.multi_dot([self.R[t],self.Q["v",t],Vbar])\
                    +self.d[t]
            fG=Sigma["f",t]-np.dot(self.R[t],Psi["f",t])
            self.F[t]=zonotope(fbar,fG)
            F_G_reduced=Girard(fG,fG.shape[0])
            self.F_reduced[t]=zonotope(fbar,F_G_reduced)
        for t in range(T):
            self.Xi["x",t+1]=triangular_stack(self.Xi["x",t],np.dot(self.C[t+1],self.P["x",t+1])-np.dot(self.M[t],self.Q["x",t]))
            self.Xi["w",t+1]=triangular_stack(self.Xi["w",t],np.hstack((np.dot(self.C[t+1],self.P["w",t+1])-np.dot(self.M[t],self.Q["w",t]),\
              np.zeros((self.o,self.n)) )) )
            self.Xi["v",t+1]=triangular_stack(self.Xi["v",t],np.hstack((np.dot(self.M[t],self.Q["v",t]),np.eye(self.o))))
        for t in range(T+1):
            Gw=spa.block_diag(*[self.W[t].G for tau in range(0,t+1)])
            Gv=spa.block_diag(*[self.V[t].G for tau in range(0,t+1)])
            Wbar=np.vstack([self.W[tau].x for tau in range(0,t+1)])
            Vbar=np.vstack([self.V[tau].x for tau in range(0,t+1)])
#            print "t=",t,"Xi_shapes",Xi["x",t].shape,Xi["w",t].shape,Xi["v",t].shape
#            print "bar",Wbar.shape,Vbar.shape
            Xi_bar=np.dot(self.Xi["x",t],self.X0.x)+np.dot(self.Xi["w",t],Wbar)+np.dot(self.Xi["v",t],Vbar)
            Xi_G=np.hstack((np.dot(self.Xi["x",t],self.X0.G),np.dot(self.Xi["w",t],Gw),np.dot(self.Xi["v",t],Gv)))
            self.Xi[t]=zonotope(Xi_bar,Xi_G)
            Xi_G_reduced=Girard(Xi_G,Xi_G.shape[0])
            self.Xi_reduced[t]=zonotope(Xi_bar,Xi_G_reduced)
Example #60
0
def compute_rpn_proposals(conv_cls_fs, conv_loc_fs, conv_cls_ss, conv_loc_ss,
                          multi_cls, multi_reg, cfg, image_info):
    '''
    :argument
        cfg: configs
        conv_cls: FloatTensor, [batch, num_anchors * num_classes, h, w], conv output of classification
        conv_loc: FloatTensor, [batch, num_anchors * 4, h, w], conv output of localization
        image_info: FloatTensor, [batch, 3], image size
    :returns
        proposals: Variable, [N, 7], 2-dim: batch_ix, x1, y1, x2, y2, score, label
    '''
    batch_size, num_anchors_num_classes, featmap_h, featmap_w = conv_cls_fs.shape
    # [K*A, 4]
    anchors_overplane = anchor_helper.get_anchors_over_plane(
        featmap_h, featmap_w, cfg['anchor_ratios'], cfg['anchor_scales'],
        cfg['anchor_stride'])
    B = batch_size
    A = num_anchors_num_classes // cfg['num_classes']
    assert (A * cfg['num_classes'] == num_anchors_num_classes)
    K = featmap_h * featmap_w
    cls_view_fs = conv_cls_fs.permute(0, 2, 3, 1).contiguous().view(
        B, K * A, cfg['num_classes']).cpu().numpy()
    loc_view_fs = conv_loc_fs.permute(0, 2, 3,
                                      1).contiguous().view(B, K * A,
                                                           4).cpu().numpy()
    cls_view_ss = conv_cls_ss.permute(0, 2, 3, 1).contiguous().view(
        B, K * A, cfg['num_classes']).cpu().numpy()
    loc_view_ss = conv_loc_ss.permute(0, 2, 3,
                                      1).contiguous().view(B, K * A,
                                                           4).cpu().numpy()

    if cfg['cls_loss_type'] == 'softmax_focal_loss':
        cls_view_fs = cls_view_fs[:, :, 1:]
        cls_view_ss = cls_view_ss[:, :, 1:]
    nmsed_bboxes = []
    pre_nms_top_n = cfg['top_n_per_level']
    thresh = cfg['score_thresh'] if K >= 120 else 0.0
    for b_ix in range(B):
        loc_delta_fs = loc_view_fs[b_ix, :, :]
        if multi_reg:
            anchors_overplane = bbox_helper.compute_loc_bboxes(
                anchors_overplane, loc_delta_fs)

        ka_ix_fs, cls_ix_fs = np.where(cls_view_fs[b_ix] > 0.01)
        ka_ix_ss, cls_ix_ss = np.where(cls_view_ss[b_ix] > thresh)
        if multi_cls:
            ka_ix = np.intersect1d(ka_ix_fs, ka_ix_ss)
        else:
            ka_ix = ka_ix_ss
        cls_ix = np.zeros_like(ka_ix)

        if ka_ix.size == 0:
            continue

        scores = cls_view_ss[b_ix, ka_ix, cls_ix]
        loc_delta_ss = loc_view_ss[b_ix, ka_ix, :]
        loc_anchors = anchors_overplane[ka_ix, :]

        if True or pre_nms_top_n <= 0 or pre_nms_top_n > scores.shape[0]:
            order = scores.argsort()[::-1][:pre_nms_top_n]
        else:
            inds = np.argpartition(-scores, pre_nms_top_n)[:pre_nms_top_n]
            order = np.argsort(-scores[inds])
            order = inds[order]

        scores = scores[order]
        cls_ix = cls_ix[order]
        cls_ix = cls_ix + 1
        loc_delta = loc_delta_ss[order]
        loc_anchors = loc_anchors[order]

        boxes = bbox_helper.compute_loc_bboxes(loc_anchors, loc_delta)

        batch_ix = np.full(boxes.shape[0], b_ix)
        post_bboxes = np.hstack([
            batch_ix[:, np.newaxis], boxes, scores[:, np.newaxis],
            cls_ix[:, np.newaxis]
        ])
        nmsed_bboxes.append(post_bboxes)

    if len(nmsed_bboxes) > 0:
        nmsed_bboxes = np.vstack(nmsed_bboxes)
    else:
        nmsed_bboxes = np.array([])
    return nmsed_bboxes