def transform(self, X, y=None):
        """Transform data by adding two virtual features

        Parameters
        ----------
        X: numpy ndarray, {n_samples, n_components}
            New data, where n_samples is the number of samples and n_components
            is the number of components.
        y: None
            Unused

        Returns
        -------
        X_transformed: array-like, shape (n_samples, n_features)
            The transformed feature set
        """
        X = check_array(X)
        n_features = X.shape[1]

        X_transformed = np.copy(X)

        non_zero = np.apply_along_axis(lambda row: np.count_nonzero(row),
                                        axis=1, arr=X_transformed)
        zero_col = np.apply_along_axis(lambda row: (n_features - np.count_nonzero(row)),
                                        axis=1, arr=X_transformed)

        X_transformed = np.insert(X_transformed, n_features, non_zero, axis=1)
        X_transformed = np.insert(X_transformed, n_features + 1, zero_col, axis=1)

        return X_transformed
def runLabeling(file_path, gps_filename, output_name, frames_to_skip, final_frame, lp, rp, pickle_loc):
    video_reader = WarpedVideoReader(file_path)
    #video_reader.setSubsample(True)
    video_reader.setPerspectives(pickle_loc)
    gps_reader = GPSReader(gps_filename)
    gps_dat = gps_reader.getNumericData()

    cam = getCameraParams()
    cam_to_use = cam[int(output_name[-1]) - 1]

    lp = pixelTo3d(lp, cam_to_use)
    rp = pixelTo3d(rp, cam_to_use)
    tr = GPSTransforms(gps_dat, cam_to_use)
    pitch = -cam_to_use['rot_x']
    height = 1.106
    R_camera_pitch = euler_matrix(cam_to_use['rot_x'], cam_to_use['rot_y'], cam_to_use['rot_z'], 'sxyz')[0:3, 0:3]
    Tc = np.eye(4)
    Tc[0:3, 0:3] = R_camera_pitch.transpose()
    Tc[0:3, 3] = [-0.2, -height, -0.5]
    lpts = np.zeros((lp.shape[0], 4))
    rpts = np.zeros((rp.shape[0], 4))
    for t in range(min(tr.shape[0], lp.shape[0])):
        lpts[t, :] = np.dot(tr[t, :, :], np.linalg.solve(Tc, np.array([lp[t, 0], lp[t, 1], lp[t, 2], 1])))
        rpts[t, :] = np.dot(tr[t, :, :], np.linalg.solve(Tc, np.array([rp[t, 0], rp[t, 1], rp[t, 2], 1])))

    ldist = np.apply_along_axis(np.linalg.norm, 1, np.concatenate((np.array([[0, 0, 0, 0]]), lpts[1:] - lpts[0:-1])))
    rdist = np.apply_along_axis(np.linalg.norm, 1, np.concatenate((np.array([[0, 0, 0, 0]]), rpts[1:] - rpts[0:-1])))
    start_frame = frames_to_skip
    runBatch(video_reader, gps_dat, cam_to_use, output_name, start_frame, final_frame, lpts, rpts, ldist, rdist, tr)

    print "Done with %s" % output_name
Example #3
0
    def cont_r(self, percent=0.9, N=None):
        """Return the contribution of each row."""

        if not hasattr(self, 'F'):
            self.fs_r(N=self.rank)  # generate F
        return np.apply_along_axis(lambda _: _/self.L[:N], 1,
                np.apply_along_axis(lambda _: _*self.r, 0, self.F[:, :N]**2))
Example #4
0
def handle_message(message):
	cols = list(message['data'].keys())
	
	x = message['data']

	df_cos = df_sub[cols].append(x, ignore_index = True)


	X = df_cos.values

	user_array = X[-1]
	hood_matrix = X[:-1]

	max_array = hood_matrix.max(axis = 1)
	min_array = hood_matrix.min(axis = 1)

	def translate(user_value, col_min, col_max):
		NewRange = col_max - col_min
		return (((user_value - (-1)) * NewRange) / 2) + col_min	

	user_array = [translate(x,y,z) for x,y,z in zip(user_array,min_array, max_array)]

	if len(cols) == 1:
		cs_array = np.apply_along_axis(lambda x: abs(x[0] - user_array[0]), 1, hood_matrix)
	else:
		cs_array = np.apply_along_axis(lambda x: euclidean(x, user_array), 1, hood_matrix)

	print cs_array
	max_val, min_val = max(cs_array), min(cs_array)
	color_values = np.linspace(min_val, max_val, 10)

	map_data = dict(zip(ids, cs_array))
	emit('new clusters',  map_data, color_values.tolist())
Example #5
0
    def cont_c(self, percent=0.9, N=None):  # bug? check axis number 0 vs 1 here
        """Return the contribution of each column."""

        if not hasattr(self, 'G'):
            self.fs_c(N=self.rank)  # generate G
        return np.apply_along_axis(lambda _: _/self.L[:N], 1,
                np.apply_along_axis(lambda _: _*self.c, 0, self.G[:, :N]**2))
Example #6
0
def build_seq(sub_num, stims, sub_A_sd, sub_B_sd):
    # shuffle stimulus list
    stims = stims.reindex(np.random.permutation(stims.index))
    
    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat([1,2,3,4], len(stims.index)/4, axis=0), 0)
    np.random.shuffle(ISI)
    
    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)
    
    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]
    
    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))
    
    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
Example #7
0
 def predict_log_proba(self, XB=None, XN=None ):
     if XB is not None and XN is not None:
         return np.array([self.predicao_log_prob(XB[i], XN[i]) for i in range(XB.shape[0])])
     elif XB is not None:
         return np.apply_along_axis(self.pred_log_prob_Bernoulli, 1,XB )
     else:
         return np.apply_along_axis(self.pred_log_prob_Normal, 1, XN)
Example #8
0
def proc_nb(freq, inten, args):
    ''' Process narrow band (single sweep) according to input specifications.
        Inclues: box-car smooth in each sweep;
                 linear correction of baseline in each sweep;

    Arguments:
    freq  -- freuency array, 1D/2D np.array
    inten -- intensity array, 1D/2D np.array
    args  -- input arguments, argparse Object

    Returns:
    freq_b  -- processed frequency array, 1D/2D np.array
    inten_p/b -- processed intensity array, 1D/2D np.array
    '''

    if args.box:    # do box-car smooth
        box_win = (args.box[0])
        if len(inten.shape)==1:
            freq_b = box_car(freq, box_win)
            inten_b = box_car(inten, box_win)
        else:
            freq_b = np.apply_along_axis(box_car, 0, freq, box_win)
            inten_b = np.apply_along_axis(box_car, 0, inten, box_win)
    else:
        freq_b = freq
        inten_b = inten

    if args.nobase:     # if no baseline removal is specified
        return freq_b, inten_b
    else:
        # Apply linear correction on each sweep
        inten_p = np.apply_along_axis(db_poly, 0, inten_b, 1)
        if args.spline:
            inten_p = np.apply_along_axis(db_spline, 0, inten_b)
        return freq_b, inten_p
Example #9
0
def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
    # block stimulus list and shuffle within each block
    q = len(stims.index)
    stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
    stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
    shuffle(stims)
    stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
    stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])

    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)

    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)

    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]

    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))

    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
    fx, fy = flow[y,x].T
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    h, w, _ = flow.shape
    left_eye = np.apply_along_axis(np.linalg.norm, 0, flow[:,:w/2]).mean()
    right_eye = np.apply_along_axis(np.linalg.norm, 0, flow[:,w/2:]).mean()
    print(left_eye - right_eye)

    # Calculate using angle information
    left_eye_mean_velocity = flow[:,:w/2,:].sum(axis=0)
    right_eye_mean_velocity = flow[:,w/2:,:].sum(axis=0)

    left_eye_mean_velocity_norm = np.linalg.norm(left_eye_mean_velocity)
    right_eye_mean_velocity_norm = np.linalg.norm(right_eye_mean_velocity)

    eyes_cosine = left_eye_mean_velocity.dot(right_eye_mean_velocity.T)/left_eye_mean_velocity_norm*right_eye_mean_velocity_norm
    print(eyes_cosine)

    return vis
Example #11
0
	def getMostInformativeInstances(self,classifier,unlabelledSet,vec):
		sorted_collated=[]
		if self.__uncertaintyMeasure=="randomSampling":
			# random sampling 
			if len(unlabelledSet)>=250:
				return random.sample(zip(unlabelledSet,unlabelledSet),self.__numberEachIteration)
			else:
				return zip(unlabelledSet,unlabelledSet)
		if self.__learnerType=="LogisticRegression":
			unlabelledSet_fitted = vec.transform(unlabelledSet)
			probs = classifier.predict_proba(unlabelledSet_fitted) 
			uncertainties = np.apply_along_axis( self.determineUncertainty, axis=1, arr=probs )

			collated = zip(unlabelledSet,uncertainties)
			if self.__uncertaintyMeasure=="entropy" or self.__uncertaintyMeasure=="leastConfident":
				sorted_collated = sorted(collated, key=lambda tup: tup[1], reverse=True)
			elif self.__uncertaintyMeasure=="smallestMargin":
				sorted_collated = sorted(collated, key=lambda tup: tup[1])
			return sorted_collated[:self.__numberEachIteration]
		elif self.__uncertaintyMeasure=="hyperplane":
			unlabelledSet_fitted = vec.transform(unlabelledSet)
			distances = classifier.decision_function(unlabelledSet_fitted) 
			uncertainties = np.apply_along_axis( self.determineUncertainty, axis=1, arr=distances )
			collated = zip(unlabelledSet,uncertainties)
			sorted_collated = sorted(collated, key=lambda tup: tup[1])
			return sorted_collated[:self.__numberEachIteration]
Example #12
0
File: RF.py Project: r-b-g-b/Lab
def calc_latency_by_stim(rast, stimparams):
	
	stim_psth, _ = Spikes.calc_psth_by_stim(rast, stimparams, bins = np.arange(0, 0.334, 0.001))
	stim_psth_smoo = np.apply_along_axis(myconv, 2, stim_psth)
	stim_peak_times = np.apply_along_axis(np.argmax, 2, stim_psth_smoo)
	
	return stim_peak_times
Example #13
0
    def close_gripper(self, lr, step_viewer=1, max_vel=.02, close_dist_thresh=0.004, grab_dist_thresh=0.005):
        print 'CLOSING GRIPPER'
        # generate gripper finger trajectory
        joint_ind = self.robot.GetJoint("%s_gripper_l_finger_joint" % lr).GetDOFIndex()
        start_val = self.robot.GetDOFValues([joint_ind])[0]
        print 'start_val: ', start_val
        # execute gripper finger trajectory
        dyn_bt_objs = [bt_obj for sim_obj in self.dyn_sim_objs for bt_obj in sim_obj.get_bullet_objects()]
        next_val = start_val
        while next_val:
            flr2finger_pts_grid = self._get_finger_pts_grid(lr)
            ray_froms, ray_tos = flr2finger_pts_grid['l'], flr2finger_pts_grid['r']

            # stop closing if any ray hits a dynamic object within a distance of close_dist_thresh from both sides
            next_vel = max_vel
            for bt_obj in dyn_bt_objs:
                from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
                to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
                rays_dists = np.inf * np.ones((len(ray_froms), 2))
                for rc in from_to_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_froms - rc.rayFrom))
                    rays_dists[ray_id, 0] = np.linalg.norm(rc.pt - rc.rayFrom)
                for rc in to_from_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_tos - rc.rayFrom))
                    rays_dists[ray_id, 1] = np.linalg.norm(rc.pt - rc.rayFrom)
                colliding_rays_inds = np.logical_and(rays_dists[:, 0] != np.inf, rays_dists[:, 1] != np.inf)
                if np.any(colliding_rays_inds):
                    rays_dists = rays_dists[colliding_rays_inds, :]
                    if np.any(np.logical_and(rays_dists[:, 0] < close_dist_thresh,
                                             rays_dists[:, 1] < close_dist_thresh)):
                        next_vel = 0
                    else:
                        next_vel = np.minimum(next_vel, np.min(rays_dists.sum(axis=1)))
            if next_vel == 0:
                break
            next_val = np.maximum(next_val - next_vel, 0)

            self.robot.SetDOFValues([next_val], [joint_ind])
            self.step()
            if self.viewer and step_viewer:
                self.viewer.Step()
        handles = []
        # add constraints at the points where a ray hits a dynamic link within a distance of grab_dist_thresh
        for bt_obj in dyn_bt_objs:
            from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
            to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
            
            for i in range(ray_froms.shape[0]):
                self.viewer.Step()
            ray_collisions = [rc for rcs in [from_to_ray_collisions, to_from_ray_collisions] for rc in rcs]

            for rc in ray_collisions:
                if rc.link == bt_obj.GetKinBody().GetLink('rope_59'):
                    self.viewer.Step()
                if np.linalg.norm(rc.pt - rc.rayFrom) < grab_dist_thresh:
                    link_tf = rc.link.GetTransform()
                    link_tf[:3, 3] = rc.pt
                    self._add_constraints(lr, rc.link, link_tf)
        if self.viewer and step_viewer:
            self.viewer.Step()
Example #14
0
def get_taxa_coords(tax_counts, sample_coords):
    """Returns the PCoA coords of each taxon based on the coords of the samples."""
    # normalize taxa counts along each row/sample (i.e. to get relative abundance)
    tax_counts = apply_along_axis(lambda x: x / float(sum(x)), 0, tax_counts)
    # normalize taxa counts along each column/taxa (i.e. to make PCoA score contributions sum to 1)
    tax_ratios = apply_along_axis(lambda x: x / float(sum(x)), 1, tax_counts)
    return dot(tax_ratios, sample_coords)
 def test_reproject(self):
     s = self.optgp.sample(10, fluxes=False).as_matrix()
     proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
     assert all(self.optgp.validate(proj) == "v")
     s = numpy.random.rand(10, self.optgp.warmup.shape[1])
     proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
     assert all(self.optgp.validate(proj) == "v")
Example #16
0
    def test_rank(self):
        tm._skip_if_no_scipy()
        from scipy.stats import rankdata

        self.frame['A'][::2] = np.nan
        self.frame['B'][::3] = np.nan
        self.frame['C'][::4] = np.nan
        self.frame['D'][::5] = np.nan

        ranks0 = self.frame.rank()
        ranks1 = self.frame.rank(1)
        mask = np.isnan(self.frame.values)

        fvals = self.frame.fillna(np.inf).values

        exp0 = np.apply_along_axis(rankdata, 0, fvals)
        exp0[mask] = np.nan

        exp1 = np.apply_along_axis(rankdata, 1, fvals)
        exp1[mask] = np.nan

        tm.assert_almost_equal(ranks0.values, exp0)
        tm.assert_almost_equal(ranks1.values, exp1)

        # integers
        df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))

        result = df.rank()
        exp = df.astype(float).rank()
        tm.assert_frame_equal(result, exp)

        result = df.rank(1)
        exp = df.astype(float).rank(1)
        tm.assert_frame_equal(result, exp)
Example #17
0
 def warn_other_module():
     # Apply along axis is implemented in python; stacklevel=2 means
     # we end up inside its module, not ours.
     def warn(arr):
         warnings.warn("Some warning", stacklevel=2)
         return arr
     np.apply_along_axis(warn, 0, [0])
Example #18
0
    def val(self,vec=None,dim=None):
        # vec est le np.array contenant les variables de la fonction à évaluer, 
        # Pour toute variable sur laquelle la fonction n'est pas intégrée il s'agit 
        # de la valeur constante qu'elle doit prendre
        # Pour la variable intégrée il s'agit de la borne inférieure d'intégration 
        # /!\ Une dimension à la fois pour l'intégration /!\
        J=self.J
        # Si c'est le premier appel de la fonction, on calcule la masse volumique
        if vec is None:
            dim=self.dim_int[0]
            vec=self.borne_inf
            Prod=1./(J**len(self.dim_int))
            for i in self.dim_int:
                Prod=Prod*(self.borne_sup[i]-self.borne_inf[i])
            
        Un=np.ones(J+1,float)
        Un[0]=0.5
        Un[J]=0.5
        
        x=np.repeat(np.array([vec]),[J+1],axis=0)
        x[:,dim]= vec[dim]+np.arange(J+1)*1.*(self.borne_sup[dim]-vec[dim])/J 
        # Si on est au niveau d'une feuille, on évalue et somme le volume de chaque feuille
        if dim==self.dim_int[len(self.dim_int)-1]:
            F=np.apply_along_axis( self.obj.val , axis=1, arr=x )
        else:
            next_dim= self.dim_int[np.argwhere(dim==self.dim_int)[0][0]+1]
            F=np.apply_along_axis( self.val , axis=1, arr=x, dim=next_dim )

        # si c'est le premier appel, on multiplie le volume total par la masse volumique "Prod", sinon on renvoie simplement le volume de la branche
        if dim==self.dim_int[0]:
            vol=Un.dot(F)
            return Prod*vol;
        else:
            return Un.dot(F);
Example #19
0
def evaluate_classifier(classifier, instances, labels):
    """ Return a confusion matrix using the given classifier and data set."""

    # TASK 2.8.1
    # extract positive instances, their labels
    positives = instances[labels == 1]
    pos_labels = labels[labels == 1]

    # TASK 2.8.2
    # find the predictions of classifier on positives
    # and count the no. of correct predictions therein
    pos_predictions = np.apply_along_axis(classifier,
                                          axis=1,
                                          arr=positives)
    pos_correct = sum(pos_predictions)

    # TASK 2.8.3
    # extract negative instances, their labels
    negatives = instances[labels == 0]
    neg_labels = labels[labels == 0]

    # TASK 2.8.4
    # find the predictions of classifier on negatives
    # and count the no. of correct predictions therein
    neg_predictions = np.apply_along_axis(classifier,
                                          axis=1,
                                          arr=negatives)
    neg_correct = sum(neg_predictions == 0)

    confusion_matrix = np.array([[pos_correct, pos_labels.size - pos_correct],
                                 [neg_labels.size - neg_correct, neg_correct]],
                                dtype='float')

    return confusion_matrix
Example #20
0
def _spearmanr2(a, b, axis=0):
    """
    Compute all pairwise spearman rank moment correlations between rows
    or columns of a and b

    Parameters
    ----------
    a : (N, M) numpy.ndarray
        The input cases a.
    b : (J, K) numpy.ndarray
        The input cases b. In case of axis == 0: J must equal N;
        otherwise if axis == 1 then K must equal M.
    axis : int
        If 0 the correlation are computed between a and b's columns.
        Otherwise if 1 the correlations are computed between rows.

    Returns
    -------
    cor : (N, J) or (M, K) nd.array
        If axis == 0 then (N, J) matrix of correlations between a x b columns
        else a (N, J) matrix of correlations between a x b rows.

    See Also
    --------
    scipy.stats.spearmanr
    """
    a, b = np.atleast_2d(a, b)
    assert a.shape[axis] == b.shape[axis]
    ar = np.apply_along_axis(stats.rankdata, axis, a)
    br = np.apply_along_axis(stats.rankdata, axis, b)

    return _corrcoef2(ar, br, axis=axis)
Example #21
0
def load_model(model_path):
  with open(model_path,'rb') as f:
    is_train_labeled_data = struct.unpack('<1q',f.read(8))

    (dim, num_words) = struct.unpack('<2q',f.read(2*8))
    word_embeddings = np.fromfile(f, np.float32, count=dim*(num_words+2))
    word_embeddings = np.reshape(word_embeddings, (num_words+2, dim))
    
    (dim, num_documents) = struct.unpack('<2q',f.read(2*8))
    document_embeddings = np.fromfile(f, np.float32, count=dim*num_documents)
    document_embeddings = np.reshape(document_embeddings, (num_documents, dim))

    if is_train_labeled_data:
      (ldim, rdim) = struct.unpack('<2q',f.read(2*8))
      transform_matrix = np.fromfile(f, np.float32, count=ldim*rdim)
      transform_matrix = np.reshape(transform_matrix, (rdim, ldim))
      
      (dim, num_labels) = struct.unpack('<2q',f.read(2*8))
      label_embeddings = np.fromfile(f, np.float32, count=dim*num_labels)
      label_embeddings = np.reshape(label_embeddings, (num_labels, dim))
    
  word_emb_norms = np.apply_along_axis(np.linalg.norm, 1, word_embeddings)
  doc_emb_norms = np.apply_along_axis(np.linalg.norm, 1, document_embeddings)
  label_emb_norms = np.apply_along_axis(np.linalg.norm, 1, label_embeddings)

  model = dict()
  model['word_emb'] = word_embeddings/word_emb_norms.reshape(-1,1)
  model['doc_emb']= document_embeddings/doc_emb_norms.reshape(-1,1)
  if is_train_labeled_data:
    model['label_emb']= label_embeddings/label_emb_norms.reshape(-1,1)
    model['trans_mat']= transform_matrix

  return model
Example #22
0
def preprocess(y, fs, flength, fshift, fnum):
	u"""音響信号の前処理(フレーム化・窓掛け・FFT)をする
    y: 音響信号
    fs: サンプリング周波数
    flength: フレーム長
    fshift: フレームシフト
    fnum: フレーム数
    """
	X = np.zeros([fnum, flength])    # フレーム化後のデータを格納する
	binSize = int(math.floor(fs / X.shape[1]))
	freqBin = np.arange(0, fs, binSize)

	start = 0       # 切り取り開始点
	end = flength - 1   # 切り取り終了点
	for t in range(fnum):   # フレーム化する
		X[t, 0:(flength - 1)] = y[start:end]
		start += fshift
		end += fshift

	""" 関数ham """
	W = np.apply_along_axis(ham, 1, X)          # 窓を掛ける

	S = np.apply_along_axis(np.fft.fft, 1, W)   # FFTする
	spe = np.absolute(S)        # 振幅スペクトル
	angdft = np.angle(S)        # 位相角
	specSum = np.apply_along_axis(np.sum, 1, spe)
	specRate = spe / specSum.reshape((fnum, 1)) * 100   # 振幅比率

	return [S, spe, specRate, angdft, freqBin]
Example #23
0
    def center_and_norm_table(self,table,col_mean=None, col_norm=None,
                    row_mean=None, row_norm=None, table_norm=None):
        """
        Using the norming parameters set in the constructor preprocess each
        subtable.

        Parameters
        ----------
        table       : any subtable
        col_mean    : An optional row vector of column means
        col_norm    : An optional row vector of row norms
        row_mean    : an optional column vector of row means
        row_norm    : an optional column vector of row norms
        table_norm  : optional value to divide entire table by for normalization
        """
        if table.shape[0] == 0:
            return (table,)
        t = table.samples
        # first columns
        if self._col_center:
            if col_mean is not None:
                pass
            else:
                col_mean = np.mean(t, 0)
            t = t - col_mean
        if self._col_norm:
            if col_norm is not None:
                pass
            elif self._col_norm=='l2':
                col_norm = np.apply_along_axis(np.linalg.norm,0,t)
            elif self._col_norm=='std':
                col_norm = np.apply_along_axis(np.std,0,t)
            t = t / col_norm
        # then rows
        if self._row_center:
            if row_mean is not None:
                pass
            else:
                row_mean = np.mean(t.T, 0)
            t = (t.T - row_mean).T
        if self._row_norm:
            if row_norm is not None:
                pass
            elif self._row_norm=='l2':
                row_norm = np.apply_along_axis(np.linalg.norm,0,t.T)
            elif self._row_norm=='std':
                row_norm = np.apply_along_axis(np.std,0,t.T)
            t = (t.T / row_norm).T

        # whole table norm
        if self._table_norm:
            if table_norm is not None:
                pass
            elif self._table_norm == 'l2':
                table_norm = np.linalg.norm(t)
            elif self._table_norm == 'std':
                table_norm = np.std(t)
            t = t / table_norm
        table.samples = t
        return table, col_mean, col_norm, row_mean, row_norm, table_norm
Example #24
0
def observe_cloud(pts, radius, upsample=0, upsample_rad=1):
    """
    If upsample > 0, the number of points along the rope's backbone is resampled to be upsample points
    If upsample_rad > 1, the number of points perpendicular to the backbone points is resampled to be upsample_rad points, around the rope's cross-section
    The total number of points is then: (upsample if upsample > 0 else len(self.rope.GetControlPoints())) * upsample_rad
    """
    if upsample > 0:
        lengths = np.r_[0, np.apply_along_axis(np.linalg.norm, 1, np.diff(pts, axis=0))]
        summed_lengths = np.cumsum(lengths)
        assert len(lengths) == len(pts)
        pts = math_utils.interp2d(np.linspace(0, summed_lengths[-1], upsample), summed_lengths, pts)
    if upsample_rad > 1:
        # add points perpendicular to the points in pts around the rope's cross-section
        vs = np.diff(pts, axis=0) # vectors between the current and next points
        vs /= np.apply_along_axis(np.linalg.norm, 1, vs)[:,None]
        perp_vs = np.c_[-vs[:,1], vs[:,0], np.zeros(vs.shape[0])] # perpendicular vectors between the current and next points in the xy-plane
        perp_vs /= np.apply_along_axis(np.linalg.norm, 1, perp_vs)[:,None]
        vs = np.r_[vs, vs[-1,:][None,:]] # define the vector of the last point to be the same as the second to last one
        perp_vs = np.r_[perp_vs, perp_vs[-1,:][None,:]] # define the perpendicular vector of the last point to be the same as the second to last one
        perp_pts = []
        from openravepy import matrixFromAxisAngle
        for theta in np.linspace(0, 2*np.pi, upsample_rad, endpoint=False): # uniformly around the cross-section circumference
            for (center, rot_axis, perp_v) in zip(pts, vs, perp_vs):
                rot = matrixFromAxisAngle(rot_axis, theta)[:3,:3]
                perp_pts.append(center + rot.T.dot(radius * perp_v))
        pts = np.array(perp_pts)
    return pts
Example #25
0
    def plot(self, filedir=None, file_format='pdf'):
        if filedir is None:
            filedir = self.workdir
        import matplotlib.pyplot as plt
        plt.switch_backend('agg')

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        forces = np.array(self.output['forces'])
        maxforce = [np.max(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]
        avgforce = [np.mean(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]

        if np.max(maxforce) > 0.0 and np.max(avgforce) > 0.0:
            plt.semilogy(maxforce, 'b.-', label='Max force')
            plt.semilogy(avgforce, 'r.-', label='Mean force')
        else:
            plt.plot(maxforce, 'b.-', label='Max force')
            plt.plot(avgforce, 'r.-', label='Mean force')
        plt.xlabel('Ion movement iteration')
        plt.ylabel('Max Force')
        plt.savefig(filedir + os.sep + 'forces.' + file_format)
        plt.clf()

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        stress = np.array(self.output['stress'])
        diag_stress = [np.trace(np.abs(x)) for x in stress]
        offdiag_stress = [np.sum(np.abs(np.triu(x, 1).flatten())) for x in stress]
        plt.semilogy(diag_stress, 'b.-', label='diagonal')
        plt.semilogy(offdiag_stress, 'r.-', label='off-diagonal')
        plt.legend()
        plt.xlabel('Ion movement iteration')
        plt.ylabel(r'$\sum |stress|$ (diag, off-diag)')
        plt.savefig(filedir + os.sep + 'stress.' + file_format)
Example #26
0
def standardize_design(G, mean_var=None):
    if mean_var is None:
        mean_var = (0., 1./G.shape[1])
        np.apply_along_axis(lambda x: _change_sample_stats(x, (0., 1.)), 0, G)
    else:
        G -= mean_var[0]
        G /= np.sqrt(mean_var[1])
Example #27
0
    def LS_intersection(self):
        """
        self.line_array represents a system of 2d line equations. Each row represents a different
        observation of a line in map frame on which the pinger lies. Row structure: [x1, y1, x2, y2]
        Calculates the point in the plane with the least cummulative distance to every line
        in self.line_array. For more information, see:
        https://en.wikipedia.org/wiki/Line-line_intersection#In_two_dimensions_2
        """

        def line_segment_norm(line_end_pts):
            assert len(line_end_pts) == 4
            return npl.norm(line_end_pts[2:] - line_end_pts[:2])

        begin_pts = self.line_array[:, :2]
        diffs = self.line_array[:, 2:4] - begin_pts
        norms = np.apply_along_axis(line_segment_norm, 1, self.line_array).reshape(diffs.shape[0], 1)
        rot_left_90 = np.array([[0, -1], [1, 0]])
        perp_unit_vecs = np.apply_along_axis(lambda unit_diffs: rot_left_90.dot(unit_diffs), 1, diffs / norms)
        A_sum = np.zeros((2, 2))
        Ap_sum = np.zeros((2, 1))

        for x, y in zip(begin_pts, perp_unit_vecs):
            begin = x.reshape(2, 1)
            perp_vec = y.reshape(2, 1)
            A = perp_vec.dot(perp_vec.T)
            Ap = A.dot(begin)
            A_sum += A
            Ap_sum += Ap

        res = npl.inv(A_sum).dot(Ap_sum)
        self.pinger_position = Point(res[0], res[1], 0)
        return self.pinger_position
Example #28
0
def testLBP (format, formatMask, path, output) :
    dataset = pd.read_csv(path)
    idxCls = dataset['idx']
   # cnts = dataset['Cnt']
    fnList = dataset['path']
  #  out = open(output, 'w')
    lbps = list(map(lambda x: local_binary_pattern(cv2.bitwise_and(imread(format.format(x)),imread(formatMask.format(x))), lbpP, lbpR, lbpMethod), fnList))
    histograms = list(map(lambda x:  np.histogram(x, bins=range(int(np.max(lbps)) + 1))[0], lbps))
    distances = prw.pairwise_distances(histograms, metric='l1')
    np.fill_diagonal(distances, math.inf)
    guessedClasses = np.apply_along_axis(lambda x: np.argmin(x), 1, distances)
    scores = np.apply_along_axis(lambda x: np.min(x), 1, distances)
    correct = list(map(lambda i: idxCls[guessedClasses[i]] == idxCls[i], range(0, np.alen(idxCls))))
   # out.write(str(np.average(correct)))
  #  fpr, tpr, thresholds = roc_curve(correct, scores, pos_label=1)
  #  pyplot.plot(tpr, fpr)
   # pyplot.show()
    with open(output + 'lbp_distances.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerows(distances)

    with open(output + 'lbp_guessedClasses.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(guessedClasses)

    with open(output + 'lbp_correct.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(correct)

    with open(output + 'lbp_real.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(idxCls)
def multiStepMC(z, price_evolution, anti = False
                , tracker = lambda S_ts : S_ts):
    '''
    multi-step-mc:
    ***NOTE THE STEPS IS DETERMINED BY THE DIMENSION OF Z (which is a np.array)***

    assume equally spaced time steps


    price_evolution: a function that takes an 1d array of Z slice and
                     returns 1d array (+1 size to include s0) of the evlotion
                     of underlyings which based on the Zs

    tracker: a function (takes an array of evolution of underlyings)
            that keep track of features of the evolution of the
            stock price, which could be max/min, or whether a boundary is hitted
    '''

    if anti:
        z = -z

    ## generate the evolution of underlyings for all pathes
    evolutions_ = np.apply_along_axis(price_evolution, 1, z)

    return evolutions_[:,-1], np.apply_along_axis(tracker, 1, evolutions_)
def get_divergence_diversity_sliding(aft, block_length, VERBOSE=0):
    '''Get local divergence and diversity in a sliding window'''
    cons_ind = Patient.get_initial_consensus_noinsertions(aft, return_ind=True)
    ind_N = cons_ind == 5
    cons_ind[ind_N] = 0
    aft_nonanc = 1.0 - aft[:, cons_ind, np.arange(aft.shape[2])]
    aft_nonanc[:, ind_N] = 0

    aft_var = (aft * (1 - aft)).sum(axis=1)

    struct = np.ones(block_length)

    dg = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_nonanc), hard_mask=True)
    ds = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_var), hard_mask=True)

    # NOTE: normalization happens based on actual coverage
    norm = np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                               axis=1, arr=(-aft[:, 0].mask))

    dg.mask = norm < block_length
    dg /= norm

    ds.mask = norm < block_length
    ds /= norm

    x = np.arange(dg.shape[1]) + (block_length - 1) / 2.0

    return (x, dg, ds)
Example #31
0
    def forward(self, X, *args, **kwargs):
        self.zin = self.input_unit.forward(X)

        self.zout = np.apply_along_axis(self._pool, 1,
                                        self._add_padding(self.zin))
        return self.zout
 def slice_second_matrix(vector, matrix):
     return np.apply_along_axis(apply, axis=axis, arr=matrix, m_1=vector)
Example #33
0
for j in range(0, Xtrain.shape[1]):
    u = np.unique(Xtrain[:, j])
    varvals[nx:nx + u.size] = u
    nx = nx + u.size
    varlist.append(u)

varvals = varvals[~np.isnan(varvals)]


def foo(col):
    u = np.unique(col)
    nunq = u.shape
    return nunq


invals = np.apply_along_axis(foo, 0, Xtrain)
invals = invals[0]

# used later to find coefPaths
pathdataOH = np.repeat(newPaths[idxKeep], invals)
# used later to find the original location of the path from non one hot encode
oldpath = np.repeat(idxOP[idxKeep], invals)

randomize_idx = np.arange(len(y))
np.random.shuffle(randomize_idx)
tiledata = Xtrain[randomize_idx, :]
y = y[randomize_idx]
print("random y: ", y)

nnz = np.count_nonzero(tiledata, axis=0)
    da.data = dat
    da_interp = da.resample(time='1D').interpolate('linear')

    # spatial smoothing...
    # spatially smooth the 2-D daily slices of data using a mean generic filter. (without any aggregation)
    footprint_type = 'queens'
    footprint_lu = {'rooks':np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]), 
                    'queens':np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])}

    footprint = footprint_lu[ footprint_type ]
    spatial_smoothed = np.array(spatial_smooth( da_interp.values, footprint=footprint, ncpus=ncpus ))

    # if window_len == 1:
    #     window_len = 'paper_weights'
    # hanning smooth
    hanning_smoothed = np.apply_along_axis( smooth3, arr=spatial_smoothed, axis=0 )
    hanning_smoothed = np.apply_along_axis( smooth3, arr=hanning_smoothed, axis=0 )
    hanning_smoothed = np.apply_along_axis( smooth3, arr=hanning_smoothed, axis=0 )
    #     # hanning_smoothed[(da_interp.values > 1) | (da_interp.values < 0)] = da_interp.values[(da_interp.values > 1) | (da_interp.values < 0)]
    # else:
    #     fsmooth2 = partial( smooth2, window_len=window_len, window='hanning' )
    #     hanning_smoothed = np.apply_along_axis( fsmooth2, arr=spatial_smoothed, axis=0 )

    # make sure no values < 0, set to 0
    hanning_smoothed[ np.where(hanning_smoothed < 0) ] = 0

    # make sure no values > 1, set to 1
    hanning_smoothed[ np.where(hanning_smoothed > 1) ] = 1

    # write this out as a GeoTiff
    out_fn = os.path.join( base_path,'GTiff','alaska_singlefile','nsidc_0051_sic_nasateam_{}-{}_Alaska_hann_smoothed.tif'.format(str(begin.year),str(end.year)) )
Example #35
0
 def random_sample(self,
                   inputs,
                   n,
                   topk=None,
                   topp=None,
                   states=None,
                   temperature=1,
                   min_ends=1):
     """随机采样n个结果
     说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
          表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
     返回:n个解码序列组成的list。
     """
     inputs = [np.array([i]) for i in inputs]
     output_ids = self.first_output_ids
     results = []
     for step in range(self.maxlen):
         probas, states = self.predict(inputs, output_ids, states,
                                       temperature, 'probas')  # 计算当前概率
         probas /= probas.sum(axis=1, keepdims=True)  # 确保归一化
         if step == 0:  # 第1步预测后将结果重复n次
             probas = np.repeat(probas, n, axis=0)
             inputs = [np.repeat(i, n, axis=0) for i in inputs]
             output_ids = np.repeat(output_ids, n, axis=0)
         if topk is not None:
             k_indices = probas.argpartition(-topk,
                                             axis=1)[:, -topk:]  # 仅保留topk
             probas = np.take_along_axis(probas, k_indices,
                                         axis=1)  # topk概率
             probas /= probas.sum(axis=1, keepdims=True)  # 重新归一化
         if topp is not None:
             p_indices = probas.argsort(axis=1)[:, ::-1]  # 从高到低排序
             probas = np.take_along_axis(probas, p_indices, axis=1)  # 排序概率
             cumsum_probas = np.cumsum(probas, axis=1)  # 累积概率
             flag = np.roll(cumsum_probas >= topp, 1, axis=1)  # 标记超过topp的部分
             flag[:, 0] = False  # 结合上面的np.roll,实现平移一位的效果
             probas[flag] = 0  # 后面的全部置零
             probas /= probas.sum(axis=1, keepdims=True)  # 重新归一化
         sample_func = lambda p: np.random.choice(len(p), p=p)  # 按概率采样函数
         sample_ids = np.apply_along_axis(sample_func, 1, probas)  # 执行采样
         sample_ids = sample_ids.reshape((-1, 1))  # 对齐形状
         if topp is not None:
             sample_ids = np.take_along_axis(p_indices, sample_ids,
                                             axis=1)  # 对齐原id
         if topk is not None:
             sample_ids = np.take_along_axis(k_indices, sample_ids,
                                             axis=1)  # 对齐原id
         output_ids = np.concatenate([output_ids, sample_ids], 1)  # 更新输出
         end_counts = (output_ids == self.end_id).sum(1)  # 统计出现的end标记
         if output_ids.shape[1] >= self.minlen:  # 最短长度判断
             flag = (end_counts == min_ends)  # 标记已完成序列
             if flag.any():  # 如果有已完成的
                 for ids in output_ids[flag]:  # 存好已完成序列
                     results.append(ids)
                 flag = (flag == False)  # 标记未完成序列
                 inputs = [i[flag] for i in inputs]  # 只保留未完成部分输入
                 output_ids = output_ids[flag]  # 只保留未完成部分候选集
                 end_counts = end_counts[flag]  # 只保留未完成部分end计数
                 if len(output_ids) == 0:
                     break
     # 如果还有未完成序列,直接放入结果
     for ids in output_ids:
         results.append(ids)
     # 返回结果
     return results
def is_c_reduced(vecs, c):
    """Check if a basis is c-reduced."""
    vecs = gs(vecs)
    r = np.apply_along_axis(lambda x: np.linalg.norm(x)**2, 1, vecs)
    return np.all((r[: -1] / r[1:]) < c)
Example #37
0
def _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None,
               events=None, tmin=0., sfreq=1., method_params=None, info=None):
    """Fit filters and coefs using Xdawn Algorithm.

    Xdawn is a spatial filtering method designed to improve the signal
    to signal + noise ratio (SSNR) of the event related responses. Xdawn was
    originally designed for P300 evoked potential by enhancing the target
    response with respect to the non-target response. This implementation is a
    generalization to any type of event related response.

    Parameters
    ----------
    epochs_data : array, shape (n_epochs, n_channels, n_times)
        The epochs data.
    y : array, shape (n_epochs)
        The epochs class.
    n_components : int (default 2)
        The number of components to decompose the signals signals.
    reg : float | str | None (default None)
        If not None (same as ``'empirical'``, default), allow
        regularization for covariance estimation.
        If float, shrinkage is used (0 <= shrinkage <= 1).
        For str options, ``reg`` will be passed as ``method`` to
        :func:`mne.compute_covariance`.
    signal_cov : None | Covariance | array, shape (n_channels, n_channels)
        The signal covariance used for whitening of the data.
        if None, the covariance is estimated from the epochs signal.
    events : array, shape (n_epochs, 3)
        The epochs events, used to correct for epochs overlap.
    tmin : float
        Epochs starting time. Only used if events is passed to correct for
        epochs overlap.
    sfreq : float
        Sampling frequency.  Only used if events is passed to correct for
        epochs overlap.

    Returns
    -------
    filters : array, shape (n_channels, n_channels)
        The Xdawn components used to decompose the data for each event type.
    patterns : array, shape (n_channels, n_channels)
        The Xdawn patterns used to restore the signals for each event type.
    evokeds : array, shape (n_class, n_components, n_times)
        The independent evoked responses per condition.
    """
    n_epochs, n_channels, n_times = epochs_data.shape

    classes = np.unique(y)

    # XXX Eventually this could be made to deal with rank deficiency properly
    # by exposing this "rank" parameter, but this will require refactoring
    # the linalg.eigh call to operate in the lower-dimension
    # subspace, then project back out.

    # Retrieve or compute whitening covariance
    if signal_cov is None:
        signal_cov = _regularized_covariance(
            np.hstack(epochs_data), reg, method_params, info, rank='full')
    elif isinstance(signal_cov, Covariance):
        signal_cov = signal_cov.data
    if not isinstance(signal_cov, np.ndarray) or (
            not np.array_equal(signal_cov.shape,
                               np.tile(epochs_data.shape[1], 2))):
        raise ValueError('signal_cov must be None, a covariance instance, '
                         'or an array of shape (n_chans, n_chans)')

    # Get prototype events
    if events is not None:
        evokeds, toeplitzs = _least_square_evoked(
            epochs_data, events, tmin, sfreq)
    else:
        evokeds, toeplitzs = list(), list()
        for c in classes:
            # Prototyped response for each class
            evokeds.append(np.mean(epochs_data[y == c, :, :], axis=0))
            toeplitzs.append(1.)

    filters = list()
    patterns = list()
    for evo, toeplitz in zip(evokeds, toeplitzs):
        # Estimate covariance matrix of the prototype response
        evo = np.dot(evo, toeplitz)
        evo_cov = _regularized_covariance(evo, reg, method_params, info,
                                          rank='full')

        # Fit spatial filters
        try:
            evals, evecs = linalg.eigh(evo_cov, signal_cov)
        except np.linalg.LinAlgError as exp:
            raise ValueError('Could not compute eigenvalues, ensure '
                             'proper regularization (%s)' % (exp,))
        evecs = evecs[:, np.argsort(evals)[::-1]]  # sort eigenvectors
        evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
        _patterns = np.linalg.pinv(evecs.T)
        filters.append(evecs[:, :n_components].T)
        patterns.append(_patterns[:, :n_components].T)

    filters = np.concatenate(filters, axis=0)
    patterns = np.concatenate(patterns, axis=0)
    evokeds = np.array(evokeds)
    return filters, patterns, evokeds
Example #38
0
df = df[["Outstate", "F.Undergrad"]]

x = np.random.rand(
    len(df)
)  # This code generates a random number between 0 and 1 for the length of df

mask = np.random.rand(
    len(df)
) < 0.75  # This codes gives us random number for 75% of data set, this will be used as traning set
""" Creating training and test data """
tr_dt = df[mask]
te_dt = df[~mask]
""" Labels """
tr_labels = target[mask]
te_labels = target[~mask]


def edist(x, y):
    return np.sqrt(np.sum((x - y)**2))


x = tr_dt.values
x1 = df[df.index == "Chestnut Hill College"].values
print(x1)
k = 3
dists = np.apply_along_axis(lambda x: edist(x, x1), 1, x)
topk = np.argsort(dists)[:k]
print(target[topk])

print(target[df.index == "Chestnut Hill College"])
def interpolatePN(y, empty_fill_val=0):
    np.apply_along_axis(interpolateRow, 0, y)
    return y
Example #40
0
    def test_normalizations_randomized(self, seed_value,
                                       normalizer_name_and_func,
                                       add_nulls_to_factor):

        name, func = normalizer_name_and_func

        shape = (7, 7)

        # All Trues.
        nomask = self.ones_mask(shape=shape)
        # Falses on main diagonal.
        eyemask = self.eye_mask(shape=shape)
        # Falses on other diagonal.
        eyemask90 = rot90(eyemask)
        # Falses on both diagonals.
        xmask = eyemask & eyemask90

        # Block of random data.
        factor_data = self.randn_data(seed=seed_value, shape=shape)
        if add_nulls_to_factor:
            factor_data = where(eyemask, factor_data, nan)

        # Cycles of 0, 1, 2, 0, 1, 2, ...
        classifier_data = (
            (self.arange_data(shape=shape, dtype=int64_dtype) + seed_value) %
            3)
        # With -1s on main diagonal.
        classifier_data_eyenulls = where(eyemask, classifier_data, -1)
        # With -1s on opposite diagonal.
        classifier_data_eyenulls90 = where(eyemask90, classifier_data, -1)
        # With -1s on both diagonals.
        classifier_data_xnulls = where(xmask, classifier_data, -1)

        f = self.f
        c = C()
        c_with_nulls = OtherC()
        m = Mask()
        method = getattr(f, name)
        terms = {
            'vanilla': method(),
            'masked': method(mask=m),
            'grouped': method(groupby=c),
            'grouped_with_nulls': method(groupby=c_with_nulls),
            'both': method(mask=m, groupby=c),
            'both_with_nulls': method(mask=m, groupby=c_with_nulls),
        }

        expected = {
            'vanilla':
            apply_along_axis(
                func,
                1,
                factor_data,
            ),
            'masked':
            where(
                eyemask,
                grouped_apply(factor_data, eyemask, func),
                nan,
            ),
            'grouped':
            grouped_apply(
                factor_data,
                classifier_data,
                func,
            ),
            # If the classifier has nulls, we should get NaNs in the
            # corresponding locations in the output.
            'grouped_with_nulls':
            where(
                eyemask90,
                grouped_apply(factor_data, classifier_data_eyenulls90, func),
                nan,
            ),
            # Passing a mask with a classifier should behave as though the
            # classifier had nulls where the mask was False.
            'both':
            where(
                eyemask,
                grouped_apply(
                    factor_data,
                    classifier_data_eyenulls,
                    func,
                ),
                nan,
            ),
            'both_with_nulls':
            where(
                xmask,
                grouped_apply(
                    factor_data,
                    classifier_data_xnulls,
                    func,
                ),
                nan,
            )
        }

        self.check_terms(
            terms=terms,
            expected=expected,
            initial_workspace={
                f: factor_data,
                c: classifier_data,
                c_with_nulls: classifier_data_eyenulls90,
                Mask(): eyemask,
            },
            mask=self.build_mask(nomask),
        )
Example #41
0
 def vectorized_pos(a, score, axis):
     return np.apply_along_axis(stats.percentileofscore, axis, a, score)
import pandas as pd

my_array = np.array([[10, 2, 13], [21, 22, 23], [31, 32, 33], [10, 57, 20],
                     [20, 20, 20], [101, 91, 10]])


def my_function(x):
    position = np.argmax(x)
    return position


# Using <em>axis=0</em> we can apply that function to all columns:

# In[27]:

print(np.apply_along_axis(my_function, axis=0, arr=my_array))

# Using <em>axis=1</em> we can apply that function to all rows:

# In[28]:

print(np.apply_along_axis(my_function, axis=1, arr=my_array))

# ## Pandas
#
# Pandas has a similar method, the <em>apply</em> method for applying a user function by either row or column. The Pandas method for determining the position of the highest value is <em>idxmax</em>.
#
# We will convert our NumPy array to a Pandas dataframe, define our function, and then apply it to all columns. Notice that becase we are working in Pandas the returned value is a Pandas series (equivalent to a DataFrame, but with one one axis) with an index value.

# In[29]:
Example #43
0
##### get data - digits (MNIST)
##  NB first 784 cols are features. last col is labels
## train data is NxD, N = 60,000 D = 784
mnist = sc.loadmat('../../data/hw3_mnist_dist/train.mat')['trainX']
N = mnist.shape[0]
D = mnist.shape[1] - 1
mnist_test = sc.loadmat('../../data/hw3_mnist_dist/test.mat')['testX']

## contrast-normalize the pixel values.
## Divide each image (row) by its L2 norm
## Training Data
labs = mnist[:, -1, np.newaxis]
classes = np.unique(labs)
mnist = np.delete(mnist, -1, axis=1)

L2norms = np.apply_along_axis(lin.norm, 1, mnist)
L2norms.shape = (L2norms.shape[0], 1)
mnist = mnist / L2norms
## check that each row now has L2 norm of one
np.mean(
    np.isclose(np.apply_along_axis(lin.norm, 1, mnist),
               np.ones(mnist.shape[0])))
mnist = np.concatenate((mnist, labs), axis=1)
mnist[:, -1] = mnist[:, -1].astype(int)

## Testing Data
L2norms = np.apply_along_axis(lin.norm, 1, mnist_test)
L2norms.shape = (L2norms.shape[0], 1)
mnist_test = mnist_test / L2norms
## check that each row now has L2 norm of one
np.mean(
Example #44
0
def compute_dvars(in_file,
                  in_mask,
                  remove_zerovariance=False,
                  intensity_normalization=1000):
    """
    Compute the :abbr:`DVARS (D referring to temporal
    derivative of timecourses, VARS referring to RMS variance over voxels)`
    [Power2012]_.

    Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
    derivative of timecourses, VARS referring to RMS variance over voxels)`
    [Nichols2013]_ are computed.

    .. [Nichols2013] Nichols T, `Notes on creating a standardized version of
         DVARS <http://www2.warwick.ac.uk/fac/sci/statistics/staff/academic-\
research/nichols/scripts/fsl/standardizeddvars.pdf>`_, 2013.

    .. note:: Implementation details

      Uses the implementation of the `Yule-Walker equations
      from nitime
      <http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html\
#nitime.algorithms.autoregressive.AR_est_YW>`_
      for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.

    :param numpy.ndarray func: functional data, after head-motion-correction.
    :param numpy.ndarray mask: a 3D mask of the brain
    :param bool output_all: write out all dvars
    :param str out_file: a path to which the standardized dvars should be saved.
    :return: the standardized DVARS

    """
    import numpy as np
    import nibabel as nb
    from nitime.algorithms import AR_est_YW
    import warnings

    func = nb.load(in_file, mmap=NUMPY_MMAP).get_data().astype(np.float32)
    mask = nb.load(in_mask, mmap=NUMPY_MMAP).get_data().astype(np.uint8)

    if len(func.shape) != 4:
        raise RuntimeError("Input fMRI dataset should be 4-dimensional")

    idx = np.where(mask > 0)
    mfunc = func[idx[0], idx[1], idx[2], :]

    if intensity_normalization != 0:
        mfunc = (mfunc / np.median(mfunc)) * intensity_normalization

    # Robust standard deviation (we are using "lower" interpolation
    # because this is what FSL is doing
    func_sd = (np.percentile(mfunc, 75, axis=1, interpolation="lower") -
               np.percentile(mfunc, 25, axis=1, interpolation="lower")) / 1.349

    if remove_zerovariance:
        mfunc = mfunc[func_sd != 0, :]
        func_sd = func_sd[func_sd != 0]

    # Compute (non-robust) estimate of lag-1 autocorrelation
    ar1 = np.apply_along_axis(
        AR_est_YW, 1,
        regress_poly(0, mfunc, remove_mean=True)[0].astype(np.float32), 1)[:,
                                                                           0]

    # Compute (predicted) standard deviation of temporal difference time series
    diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd
    diff_sd_mean = diff_sdhat.mean()

    # Compute temporal difference time series
    func_diff = np.diff(mfunc, axis=1)

    # DVARS (no standardization)
    dvars_nstd = np.sqrt(np.square(func_diff).mean(axis=0))

    # standardization
    dvars_stdz = dvars_nstd / diff_sd_mean

    with warnings.catch_warnings():  # catch, e.g., divide by zero errors
        warnings.filterwarnings('error')

        # voxelwise standardization
        diff_vx_stdz = np.square(
            func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T)
        dvars_vx_stdz = np.sqrt(diff_vx_stdz.mean(axis=0))

    return (dvars_stdz, dvars_nstd, dvars_vx_stdz)
def to_gray_scale(img):
    img_array = np.asarray(img)
    luminosity = lambda x: 0.21 * x[0] + 0.72 * x[1] + 0.07 * x[2]
    return np.apply_along_axis(func1d=luminosity, axis=2, arr=img_array)
Example #46
0
    with open(dataFile, 'rt') as csvfile:
        datas = csv.reader(csvfile, delimiter = ',')
        for row in datas:
            
            if row is None or len(row) == 0:
                continue
            data.append(row)
    return data

def normalization(sample):
    """one sample pass in"""
    sample = sample + 100
    # 2^20 = 1048576
    return np.log2(sample * 1048576/np.sum(sample))

log = open('log.txt', 'w')

DataList = __loadData('gtex_data.csv')
LabelList = __loadData('label.csv')

testTraining = np.array(DataList).astype(np.float)
testTraining = np.apply_along_axis(normalization, 1, testTraining )

testlabeling = np.array(LabelList)

model = GaussianNB() 
model.fit(testTraining,testlabeling)

with open('gtex_TrainingNormalizedResult.pkl', 'wb') as tr:
    pickle.dump(model, tr, pickle.HIGHEST_PROTOCOL)
Example #47
0
        predictor.fit(X_train_strap,
                      y_train_strap,
                      train_meta,
                      max_depth=argsmax_depth)

        # Predict
        y_prob = predictor.predict(X_test, prob=True)
        y_pred = predictor.predict(X_test)

        indices.append(choice)
        preds.append(y_pred.astype(np.object))
        probs.append(y_prob.astype(np.object))

    indices = np.column_stack(indices)

    combined = np.apply_along_axis(np.argmax, 1, np.sum(np.array(probs),
                                                        axis=0))
    predict = []
    for i in combined:
        prediction = test_meta[-1][1][i]
        predict.append(prediction)

    predict = np.array(predict)

# Run boosted-trees
if method == 'boost':

    # Initialize instance weights
    w = np.ones(X_train.shape[0]) / X_train.shape[0]

    weights = []
    treeweights = []
Example #48
0
def main():
    train = False
    modelName = None
    if len(sys.argv) != 3:
        print("Usage: finalLSTM.py modelName [train,test]")
        exit()
    else:
        modelName = sys.argv[1]

        if (sys.argv[2] == "train"):
            train = True

    # filename = trainStates["Yucatan"]
    # filename = trainStates["Guerrero"]
    filename = trainStates["Veracruz"]

    # filename = trainStates["QuintanaRoo"]
    # filename = trainStates["Chiapas"]
    # filename = trainStates["Rio"]

    x, y = getXY(filename)

    val_x, val_y = getXY(testStates["Bahia"])

    model = loadOrCreateModel(modelName, x)

    history = None
    if (train):
        history = model.fit(x,
                            y,
                            epochs=10,
                            batch_size=x.shape[0],
                            validation_data=(val_x, val_y),
                            verbose=1,
                            shuffle=False)
        saveModel(model, modelName)
    else:
        global populations

        # testFile = trainStates["Veracruz"]
        # testFile = trainStates["Yucatan"]
        # testFile = trainStates["Guerrero"]
        testFile = trainStates["QuintanaRoo"]

        # testFile = testStates["NuevoLeon"]
        # testFile = testStates["MatoGrosso"]
        # testFile = testStates["Bahia"]
        # testFile = testStates["Chiapas"]

        scale = populations[testFile]
        x, y = getXY(testFile)
        predictions = model.predict(x)
        y = y.reshape((len(y), 1))

        inv_yPred = np.apply_along_axis(lambda x: x * scale / 100000, 1,
                                        predictions)
        inv_y = np.apply_along_axis(lambda x: x * scale / 100000, 1, y)

        rmse = sqrt(mean_squared_error(inv_y, inv_yPred))
        print('Test RMSE: %.3f' % rmse)
        print("Total", sum(inv_y))
        print("len", len(inv_y))
        pyplot.title("Cases {} RMSE: {:.2f}".format(formatFilename(testFile),
                                                    rmse))
        pyplot.ylabel("Cases")
        pyplot.xlabel("Week #")
        pyplot.plot(inv_y, label="Cases")
        pyplot.plot(inv_yPred, label="Predictions")
        pyplot.legend()
        pyplot.show()
Example #49
0
def _fill_trainval_infos(lyftdata,
                         use_flat_vehicle_coordinates,
                         use_second_format_direction,
                         calc_num_points,
                         is_test=False):
    train_infos, val_infos = [], []
    train_scene_tokens = [
        scene['token'] for scene in lyftdata.scene
        if scene['name'] in splits.train
    ]
    for sample in prog_bar(sorted(lyftdata.sample, key=lambda s: s['timestamp'])):
        lidar_token = sample['data']['LIDAR_TOP']
        cam_front_token = sample['data']['CAM_FRONT']

        sd_record = lyftdata.get('sample_data', lidar_token)
        cs_record = lyftdata.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
        pose_record = lyftdata.get('ego_pose', sd_record['ego_pose_token'])

        lidar_path, boxes_lidar, _ = lyftdata.get_sample_data(lidar_token)
        cam_path, _, cam_intrinsic = lyftdata.get_sample_data(cam_front_token)

        info = {
            'lidar_path': lidar_path,
            'cam_front_path': cam_path,
            'token': sample['token'],
            'lidar2ego_translation': cs_record['translation'],
            'lidar2ego_rotation': cs_record['rotation'],
            'ego2global_translation': pose_record['translation'],
            'ego2global_rotation': pose_record['rotation'],
            'timestamp': sample['timestamp'],
        }

        if not is_test:
            locs = np.array([b.center for b in boxes_lidar]).reshape(-1, 3)
            dims = np.array([b.wlh for b in boxes_lidar]).reshape(-1, 3)
            rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes_lidar]).reshape(-1, 1)
            gt_boxes = np.concatenate([locs, dims, rots], axis=1)
            gt_names = np.array([b.name for b in boxes_lidar])

            if calc_num_points:
                try:
                    pointcloud = LidarPointCloud.from_file(lidar_path)
                except Exception as e:
                    print('ERROR:', e, lidar_path)
                    continue

                indices = box_np_ops.points_in_rbbox(pointcloud.points.T[:, :3], gt_boxes)
                num_points_in_gt = indices.sum(0)
                info['num_lidar_pts'] = num_points_in_gt.astype(np.int32)

            if use_flat_vehicle_coordinates:
                _, boxes_flat_vehicle, _ = lyftdata.get_sample_data(
                    lidar_token,
                    use_flat_vehicle_coordinates=True
                )
                locs = np.array([b.center for b in boxes_flat_vehicle]).reshape(-1, 3)
                dims = np.array([b.wlh for b in boxes_flat_vehicle]).reshape(-1, 3)
                rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes_flat_vehicle]).reshape(-1, 1)
                gt_boxes = np.concatenate([locs, dims, rots], axis=1)
                gt_names = np.array([b.name for b in boxes_flat_vehicle])

            if use_second_format_direction:
                gt_boxes[:, 6] = np.apply_along_axis(
                    lambda r: -ds_utils.wrap_to_pi(r + np.pi / 2),
                    axis=1,
                    arr=gt_boxes[:, 6:])

            annotations = [
                lyftdata.get('sample_annotation', token)
                for token in sample['anns']
            ]
            assert len(gt_boxes) == len(annotations), f"{len(gt_boxes)} != {len(annotations)}"

            info['gt_boxes'] = gt_boxes
            info['gt_names'] = gt_names

        if sample['scene_token'] in train_scene_tokens:
            train_infos.append(info)
        else:
            val_infos.append(info)

    return train_infos, val_infos
Example #50
0
                  dtype=np.float64,
                  delimiter=',',
                  skip_header=1)


def lnglatWeights(row, geo_multiplier, park_multiplier):
    return [
        row[0], row[1], row[2] * park_multiplier, row[3] * geo_multiplier,
        row[4] * geo_multiplier
    ]


geo_rate = 100000000.
park_rate = 0.6

x = np.apply_along_axis(lnglatWeights, 1, x, geo_rate, park_rate)
print(x)
print(y)

knc = neighbors.KNeighborsClassifier(algorithm='auto')

knc.fit(x, y)


class PredictionServer(prediction_pb2_grpc.PredictionServiceServicer):
    def FindNearestHouseIndices(self, request, context):
        print(request)
        results = knc.kneighbors([[
            request.NumberOfBedrooms, request.NumberOfBathrooms,
            request.NumberOfParkings * park_rate, request.Latitude * geo_rate,
            request.Longitude * geo_rate
Example #51
0
 def _normalize_in_log_space(self, hist):
     """ Normalize in each dimension in log space """
     return np.apply_along_axis(lambda x: x - self.logSumExp(x), 1, hist)
Example #52
0
Create a numpy matrix where each row corresponds to a document
and each column a word. The value should be the count of the
number of times that word appeared in that document.

'''

for i in range(len(all_articles)):
    count=Counter(porter_stem[i])
    for k in count:
        j=rev_lookup[k]
        count_matrix[i][j]+=count[k]


#for each article count the word
#for each word, look up index
#in every for loop keep in mind the number of row

#apply laplace smoothing constant of 1
doc_freq=Counter()

for article in porter_stem:
    count = Counter(article)
    for k in count:
        doc_freq[k]+=1

#normalize the matrix

norm=np.apply_along_axis(lambda x: np.sqrt(np.sum(x**2)),1, count_matrix).reshape(len(all_articles),1)

norm_matrix=np.divide(count_matrix,norm)
Example #53
0
def param_est(dict_mkr_coords, asis_breadth=None):
    v_markers(dict_mkr_coords, asis_breadth=asis_breadth)

    i_vect = (dict_mkr_coords['RASI'] - dict_mkr_coords['LASI'])
    i_vect = i_vect / np.linalg.norm(i_vect, axis=1)[:, None]
    k_vect = np.cross(dict_mkr_coords['RASI'] - dict_mkr_coords['SACR'],
                      dict_mkr_coords['LASI'] - dict_mkr_coords['SACR'],
                      axis=1)
    k_vect = k_vect / np.linalg.norm(k_vect, axis=1)[:, None]
    j_vect = np.cross(k_vect, i_vect)

    med = partial(medfilt, kernel_size=51)

    def sm_med(a):
        return med(np.convolve(a, hanning(11), 'same'))

    la_speed = np.linalg.norm(np.apply_along_axis(
        sm_med, 0, np.gradient(dict_mkr_coords['LHEE'], axis=0)),
                              axis=1)
    ra_speed = np.linalg.norm(np.apply_along_axis(
        sm_med, 0, np.gradient(dict_mkr_coords['RHEE'], axis=0)),
                              axis=1)

    d_speed = ra_speed - la_speed

    direc = j_vect
    direc[:, 2] = 0
    dist = dict_mkr_coords['RHEE'] - dict_mkr_coords['LHEE']
    ap_dist = np.empty(dist.shape[0])
    sacr_speed = np.apply_along_axis(
        med, 0, np.gradient(dict_mkr_coords['SACR'], axis=0))
    for i in range(dist.shape[0]):
        ap_dist[i] = np.dot(dist[i, :], direc[i, :])
        sacr_speed[i] = np.dot(sacr_speed[i, :], direc[i, :])

    height_dist = dict_mkr_coords['RHEE'][:, 2] - dict_mkr_coords['LHEE'][:, 2]

    step_class = np.ones(dist.shape[0])
    step_class[d_speed > 0.001] = 0
    step_class[d_speed < -0.001] = 2
    mode_w = 51
    step_window = rolling_window(step_class, mode_w)

    def mode_filter(a, classes=(0, 1, 2)):
        scores = []
        if classes is None:
            classes = np.unique(a)
        for c in classes:
            scores.append(np.sum(a == c))
        return classes[int(np.argmax(scores))]

    step_class = np.apply_along_axis(mode_filter, 1, step_window)
    step_class = np.append(step_class,
                           np.ones(int((mode_w - 1) / 2)) * step_class[-1])
    step_class = np.append(
        np.ones(int((mode_w - 1) / 2)) * step_class[0], step_class)

    step_durations = []
    step_starts = []
    rs = 1  # running sum
    cc = step_class[0]  # current class
    step_starts.append(0)
    for i in range(1, dist.shape[0]):
        if step_class[i] == cc:
            rs += 1
        else:
            step_starts.append(i)
            step_durations.append(rs)
            rs = 1
            cc = step_class[i]
    step_durations.append(rs)

    def get_span(a, dist):
        t0 = step_starts[int(np.sum(a > np.array(step_starts)) - 1)]
        tend = t0 + step_durations[int(np.sum(a > np.array(step_starts)) - 1)]
        if int(np.sum(a > np.array(step_starts)) - 1) == 0:
            return max(np.max(dist[t0:tend]), 0) - min(np.min(dist[t0:tend]),
                                                       0)
        if int(np.sum(a > np.array(step_starts)) - 1) == len(step_starts) - 1:
            return max(np.max(dist[t0:tend]), 0) - min(np.min(dist[t0:tend]),
                                                       0)
        return np.max(dist[t0:tend]) - np.min(dist[t0:tend])

    v_get_stride_length = np.vectorize(partial(get_span, dist=ap_dist))
    v_get_step_height = np.vectorize(partial(get_span, dist=height_dist))
    stride_lengths = np.zeros(dist.shape[0])
    step_heights = np.zeros(dist.shape[0])

    for i, step_start in enumerate(step_starts):
        if step_class[step_start] == 1:
            continue
        stride_lengths[step_start:step_start + step_durations[i]] = \
            v_get_stride_length(np.arange(step_start, step_start + step_durations[i]))
        stride_lengths[step_start:step_start + step_durations[i]] = \
            mode_filter(stride_lengths[step_start:step_start + step_durations[i]], classes=None)
        step_heights[step_start:step_start + step_durations[i]] = \
            v_get_step_height(np.arange(step_start, step_start + step_durations[i]))
        step_heights[step_start:step_start + step_durations[i]] = \
            mode_filter(step_heights[step_start:step_start + step_durations[i]], classes=None)

    def get_speed(a):
        return stride_lengths[a] / step_durations[int(
            np.sum(a >= np.array(step_starts)) - 1)]

    step_speed = np.vectorize(get_speed)(np.arange(stride_lengths.shape[0]))

    dict_out = {
        'step_class': step_class,
        'step_heights': step_heights,
        'stride_lengths': stride_lengths,
        'la_speed': la_speed,
        'ra_speed': ra_speed,
        'sacr_speed': sacr_speed,
        'step_speed': step_speed
    }
    return dict_out
Example #54
0
 def set_hist(self, new_hist):
     """ Updates cluster density after BuildTree """
     self._loghist = self._logprior
     self._hist = np.apply_along_axis(np.exp, 1, self._loghist)
        #"lamb": lamb,
        #"alpha": alpha,
        "min_child_weight": min_child_weight,
        #"colsample_bylevel": colsample_bylevel,
        "silent": 1,
        "seed": s
    }

    kf = kfold.split(train, train_label)

    for i, (train_fold, validate) in enumerate(kf):
        X_train, X_validate, label_train, label_validate = \
            train[train_fold, :], train[validate, :], train_label[train_fold], train_label[validate]

        dtrain = xgb.DMatrix(X_train, label_train)
        dvalid = xgb.DMatrix(X_validate, label_validate)
        watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
        bst = xgb.train(params,
                        dtrain,
                        num_boost_round,
                        evals=watchlist,
                        verbose_eval=10,
                        early_stopping_rounds=50)
        cv_train[validate, :] += bst.predict(xgb.DMatrix(X_validate))
        tmp_result = list(
            np.apply_along_axis(get_top7, 1, cv_train[validate, :]))
        print mapk([[x] for x in label_validate], tmp_result)

result = list(np.apply_along_axis(get_top7, 1, cv_train))
print mapk([[x] for x in train_label], result)
Example #56
0
 def is_winning(self) -> bool:
     return np.any([
         np.apply_along_axis(np.all, axis=0, arr=self.marked).any(),
         np.apply_along_axis(np.all, axis=1, arr=self.marked).any()
     ])
Example #57
0
 def apply_function(self, function):
     for mob in self.family_members_with_points():
         mob.points = np.apply_along_axis(function, 1, mob.points)
     return self
    axarr[1].plot(f0_tr[this_sample], linewidth=3)
    axarr[2].plot(phonemes_tr[:, this_sample], linewidth=3)
    axarr[2].set_adjustable('box-forced')
    axarr[0].set_adjustable('box-forced')
    axarr[1].set_adjustable('box-forced')
    pyplot.savefig(save_dir + "samples/new/data" + str(this_sample) + ".png")
    pyplot.close()

    mgc_sp = sp_tr[this_sample]
    mgc_sp_test = numpy.hstack([mgc_sp, mgc_sp[:, ::-1][:, 1:-1]])
    mgc_sp_test = mgc_sp_test.astype('float64').copy(order='C')
    mgc_reconstruct = numpy.apply_along_axis(SPTK.mgcep,
                                             1,
                                             mgc_sp_test,
                                             order,
                                             alpha,
                                             gamma,
                                             eps=0.0012,
                                             etype=1,
                                             itype=2)
    x_synth = mgcf02wav(mgc_reconstruct, f0_tr[this_sample])
    x_synth = .95 * x_synth / max(abs(x_synth)) * 2**15
    wavfile.write(
        save_dir + "samples/new/data" + num_sample + str(this_sample) + ".wav",
        16000, x_synth.astype('int16'))

main_loop = load(save_dir + "pkl/best_" + experiment_name + ".pkl")

lookup, generator = main_loop.model.get_top_bricks()

from theano import tensor, function
Example #59
0
                                             alpha=0,
                                             param_lambda=1,
                                             n_fold=35,
                                             seed_value=seed)

            clf1 = LogisticRegression(solver='lbfgs',
                                      max_iter=1000,
                                      multi_class='multinomial',
                                      verbose=1,
                                      n_jobs=20)
            my_clf = gene_clf.my_classifier(number_class=num_class,
                                            number_fold=35,
                                            number_seed=seed)
            meta_feat1 = my_clf.predict(clf1, X_categ, y, X_categ_test, 'base')
            meta_feat1_1 = np.reshape(
                np.apply_along_axis(np.argmax, 1, meta_feat1), (-1, 1))

            X_meta = np.concatenate([X_numeric, meta_feat1_1[:n_train, :]],
                                    axis=1)
            X_meta_test = np.concatenate(
                [X_numeric_test, meta_feat1_1[n_train:, :]], axis=1)

            y_pred = my_xgb.predict(X_meta, y, X_meta_test, 'meta')
            y_pred_sum = y_pred_sum + y_pred

y_pred = y_pred_sum / seed
# save pred
submission_pred = pd.DataFrame(
    data={
        'id': ids,
        'predict_0': y_pred[:, 0],
Example #60
0
 def get_center_of_mass(self):
     return np.apply_along_axis(np.mean, 0, self.get_all_points())