Exemple #1
0
    def run(self, method=None):
        if method == HM_MDA:
            from .midpointDisplacement import MDA
            heightObject = MDA(self.size, self.roughness)
        elif method == HM_DSA:
            from .diamondSquare import DSA
            heightObject = DSA(self.size)
        elif method == HM_SPH:
            from .sphere import Sphere
            heightObject = Sphere(self.size, self.roughness)
        elif method == HM_PERLIN:
            from .perlinNoise import Perlin
            heightObject = Perlin(self.size)
        else:
            print("No method for generating heightmap found!")

        heightObject.run()
        self.heightmap = utilities.normalize(heightObject.heightmap)

        if self.islands:
            gradient = utilities.radialGradient(self.size, True, True)
            self.heightmap = self.heightmap * gradient
            self.heightmap = utilities.normalize(self.heightmap)

        del heightObject
Exemple #2
0
    def run(self, method = None):      
        if method == HM_MDA:
            from .midpointDisplacement import MDA            
            heightObject = MDA(self.size, self.roughness)
        elif method == HM_DSA:
            from .diamondSquare import DSA
            heightObject = DSA(self.size)
        elif method == HM_SPH:
            from .sphere import Sphere
            heightObject = Sphere(self.size, self.roughness)
        elif method == HM_PERLIN:
            from .perlinNoise import Perlin
            heightObject = Perlin(self.size)
        else:
            print("No method for generating heightmap found!")
        
        heightObject.run()
        self.heightmap = utilities.normalize(heightObject.heightmap)
        
        if self.islands:
            gradient = utilities.radialGradient(self.size, True, True)
            self.heightmap = self.heightmap * gradient
            self.heightmap = utilities.normalize(self.heightmap)

        del heightObject
 def step(self, particle, obs):
     self.t += self.hsw.step_len
     particle_ip1 = []
     for ptc in particle:
         p = self.step_particle(ptc, obs)
         particle_ip1.append(p)
     normalize(particle_ip1, 0)
     re_particle_ip1 = resample(particle_ip1, self.N)
     ave_state = sum([p.weight * p.state for p in re_particle_ip1])
     max_mode = [p.mode for p in re_particle_ip1]
     num_counter = Counter(max_mode)
     max_mode = num_counter.most_common(1)[0][0]
     self.tracjectory.append(re_particle_ip1)
     self.state.append(ave_state)
     self.mode.append(max_mode)
    def to_wav(self, filename, mono=False, norm=False, type=float):
        '''
        Save all the signals to wav files
        '''
        from scipy.io import wavfile

        if mono is True:
            signal = self.signals[self.M/2]
        else:
            signal = self.signals.T  # each column is a channel

        if type is float:
            bits = None
        elif type is np.int8:
            bits = 8
        elif type is np.int16:
            bits = 16
        elif type is np.int32:
            bits = 32
        elif type is np.int64:
            bits = 64
        else:
            raise NameError('No such type.')

        if norm is True:
            from utilities import normalize
            signal = normalize(signal, bits=bits)

        signal = np.array(signal, dtype=type)

        wavfile.write(filename, self.Fs, signal)
Exemple #5
0
 def learn(self, Xtrain, ytrain):
     numsamples = Xtrain.shape[0]
     Xless = Xtrain[:, self.params['features']]
     train_data = super(LassoRegression, self).addBias(Xless)
     initial = np.random.rand(train_data.shape[1], 1)
     self.weights = utils.normalize(initial, 'l2', axis=0)
     self.lRegression(train_data, ytrain, numsamples)
	def get_data(self):
		for key in self.agents.keys():
			routines = [x.routine for x in self.agents[key]]
			convergences = [x.convergence for x in self.agents[key]]
			convergence_proportion = np.mean(convergences)
			
			means = utils.normalize(np.mean(routines, 0))
			
			sds = np.std(routines, 0)
			
			try:
				change_convergence_proportion = convergence_proportion - \
									self.data['convergence_proportion_{}'.format(key)][-1] 
				change_means = utils.calculate_tvd(np.array(self.data['means_{}'.format(key)][-1]).ravel(),\
											np.array(means).ravel()) 
				change_sds = sds - self.data['sd_{}'.format(key)][-1]
			except:
				change_convergence_proportion = convergence_proportion
				change_means = utils.calculate_tvd(np.array([0,0,0,0]),\
											np.array(means).ravel()) 
				change_sds = sds
			self.data['means_{}'.format(key)] += [np.array(means).ravel()]
			self.data['means_change_{}'.format(key)] += [change_means]
			self.data['sd_{}'.format(key)] += [np.array(sds).ravel()]
			self.data['sd_change_{}'.format(key)] += [np.array(change_sds).ravel()]
			self.data['convergence_proportion_{}'.format(key)] += [convergence_proportion]
			self.data['convergence_proportion_change_{}'.format(key)] += [change_convergence_proportion]
		
		self.data['time'] += [self.time + 1]
Exemple #7
0
def tokenize_map(s):
    words = normalize(s).split()
    triplet_count = Counter()
    for word in words:
        triplet_count += Counter(
            [word] if len(word) <= 3 else adj_triples(word))
    return triplet_count
Exemple #8
0
def clean_labels(cand, pref_label):

    alt_label = []

    # Exclude some unwanted candidates
    unwanted = ['/', '|']
    for s in unwanted:
        for c in cand[:]:
            if c and c.find(s) > -1:
                cand.remove(c)

    # Exclude identical alt labels and alt labels identical to the pref label
    alt_label = []
    for l in cand:
        l_norm = utilities.normalize(remove_spec(l))
        if l_norm and l_norm != pref_label:
            if l_norm not in alt_label:
                alt_label.append(l_norm)

    # Exclude alt labels that contain the same words as the pref label
    for l in alt_label[:]:
        if len(set(l.split()) & set(pref_label.split())) == len(l.split()):
            if len(pref_label.split()) == len(l.split()):
                alt_label.remove(l)

    return alt_label
Exemple #9
0
	def variation(self, inertial_constant=1, eq_tolerance=0.05, change_tolerance=0.25):
		sources_of_influence = np.vstack([self.external_influence, \
		                                 np.reshape(np.concatenate(self.environment.group_decisions, axis =0), (4,4))])
		relative_impacts = self.relative_influences
		compiled_influence = np.transpose(np.array(sources_of_influence)).dot(relative_impacts)
		compiled_influence = utils.normalize(compiled_influence)
		self.change_opinion(compiled_influence, inertial_constant, eq_tolerance, change_tolerance)
Exemple #10
0
 def __init__(self,
              pos=np.array([0.0, 0.0]),
              ori=np.array([1.0, 0.0]),
              color='black'):
     self._pos = np.array(pos)
     self._ori = uts.normalize(ori)
     self._color = color
def normalize(text):
    text = utilities.normalize(text).encode("utf8")
    text = re.sub(r"\.{2,}", " ", text)  #replaces ellipses at the end of words
    word_list = text.lower().split()
    clean_list = []
    for word in word_list:
        word = word.strip("?.,_;\":!'-*()")
        clean_list.append(word)
    return clean_list
def normalize(text): 
    text = utilities.normalize(text).encode("utf8")
    text = re.sub(r"\.{2,}", " ", text) #replaces ellipses at the end of words
    word_list = text.lower().split()
    clean_list = []
    for word in word_list:
        word = word.strip("?.,_;\":!'-*()")
        clean_list.append(word)
    return clean_list
Exemple #13
0
def __get_generator(flair_names, seg_names, batch_size, debug, is_test,
                    to_count):
    while True:
        batched_overall = []
        batched_seg = []
        for flair, seg in zip(flair_names, seg_names):
            flair_img = get_scan(os.path.join(BRATS_PARENT, flair))
            t1ce = flair.replace("flair", "t1ce")
            t1ce_img = get_scan(os.path.join(BRATS_PARENT, t1ce))
            flair_img = crop_resample_img(normalize(flair_img))
            t1ce_img = crop_resample_img(normalize(t1ce_img))

            seg_img = get_scan(os.path.join(BRATS_PARENT, seg))
            seg_img = crop_resample_img(seg_img)
            seg_img[seg_img >= 1] = 1

            if debug:
                show_an_image_slice(flair_img, "flair original")
                show_an_image_slice(t1ce_img, "t1ce original")
                show_an_image_slice(seg_img, "segmentation original")

            s = flair_img.shape
            flair_img = numpy.reshape(flair_img, (1, s[0], s[1], s[2], 1))
            t1ce_img = numpy.reshape(t1ce_img, (1, s[0], s[1], s[2], 1))
            overall = numpy.concatenate((flair_img, t1ce_img), axis=-1)
            seg_img = numpy.reshape(seg_img, (1, s[0], s[1], s[2], 1))

            batched_overall.append(overall)
            batched_seg.append(seg_img)

            if len(batched_overall) == batch_size:
                yield numpy.concatenate(batched_overall,
                                        axis=0), numpy.concatenate(batched_seg,
                                                                   axis=0)
                batched_overall = []
                batched_seg = []

        if len(batched_overall) != 0:
            yield numpy.concatenate(batched_overall,
                                    axis=0), numpy.concatenate(batched_seg,
                                                               axis=0)

        if is_test | to_count:
            break
Exemple #14
0
def frequency(columns, probability=False):
    """ /!\ Warning: Take only column(s) and not DataFrame /!\
        Frequency encoding:
            Pandas series to frequency/probability distribution.
        
        If there are several series, the outputs will have the same format.
        Example:
          C1: ['b', 'a', 'a', 'b', 'b']
          C2: ['b', 'b', 'b', 'c', 'b']
          
          f1: ['a': 2, 'b'; 3, 'c': 0]
          f2: ['a': 0, 'b'; 4, 'c': 1]
          
          Output: [[2, 3, 0], [0, 4, 1]] (with probability = False)
          
        :param probability: True for probablities, False for frequencies.
        :return: Frequency/probability distribution.
        :rtype: list
    """ # TODO error if several columns have the same header

    # If there is only one column, just return frequencies
    if not isinstance(columns[0], (list, np.ndarray, pd.Series)):
        return columns.value_counts(normalize=probability).values

    frequencies = []

    # Compute frequencies for each column
    for column in columns:
        f = dict()
        for e in column:
            if e in f:
                f[e] += 1
            else:
                f[e] = 1
        frequencies.append(f)

    # Add keys from other columns in every dictionaries with a frequency of 0
    # We want the same format
    for i, f in enumerate(frequencies):
        for k in f.keys():
            for other_f in frequencies[:i] + frequencies[i + 1:]:
                if k not in other_f:
                    other_f[k] = 0

    # Convert to frequency/probability distribution
    res = []
    for f in frequencies:
        l = list(f.values())
        if probability:
            # normalize between 0 and 1 with a sum of 1
            l = normalize(l)
        # Convert dict into a list of values
        res.append(l)
        # Every list will follow the same order because the dicts contain the same keys

    return res
 def __init__(self,
              pos=np.array([0.0, 0.0]),
              ori=np.array([1.0, 0.0]),
              fp=fps.EggFootprint(),
              color='black',
              landmarks=[]):
     self._pos = np.array(pos)
     self._ori = uts.normalize(ori)
     self._fp = fp
     self._color = color
Exemple #16
0
    def getWeff(self,freq): #get Weff at a certain frequency, thus how the models are gotten under the hood will not matter
        
        for i,func in enumerate(self.funcs):
            if min(func.x) <= freq <= max(func.x):
                temp = func(freq,self.bins)

                U = u.normalize(temp,simple=True) #remove baseline?
        
                tot = np.sum(np.power(U[1:]-U[:-1],2))
                return 1e6*self.P/np.sqrt(len(self.bins)*tot) # in us
    def step(self, obs):
        self.t += self.hsw.step_len
        mode_i0 = self.mode0 if not self.state else self.mode[len(
            self.state) - 1]  # lastest mode
        particle_ip1 = {}
        for m in range(self.mode_num):
            particle_ip1[m] = []
            particle = self.mode_particle_dict[m]
            for ptc in particle:
                p = self.step_particle(ptc, obs, mode_i0)
                particle_ip1[m].append(p)

        weight = [
            sum([p.weight for p in particle_ip1[m]]) for m in particle_ip1
        ]
        weight = [w / sum(weight) for w in weight]
        w_max = max(weight)  # maximal weight
        m_opt = weight.index(w_max)  # optimal mode

        new_particle_ip1 = {}
        for m in range(self.mode_num):
            if weight[m] == 0 or (w_max / weight[m] > self.r):
                copy_ptc = []
                for p in particle_ip1[m_opt]:
                    _p = p.clone()
                    _p.mode = m
                    copy_ptc.append(_p)
                new_particle_ip1[m] = copy_ptc
            else:
                new_particle_ip1[m] = particle_ip1[m]
        m_opt = m_opt if weight[mode_i0] == 0 or (
            weight[m_opt] / weight[mode_i0] > self.r) else mode_i0

        for m in range(self.mode_num):
            normalize(new_particle_ip1[m], 0.0001)
            new_particle_ip1[m] = resample(new_particle_ip1[m])

        ave_state = sum([p.weight * p.state for p in new_particle_ip1[m_opt]])
        self.tracjectory.append(new_particle_ip1[m_opt])
        self.state.append(ave_state)
        self.mode.append(m_opt)
        self.mode_particle_dict = new_particle_ip1
Exemple #18
0
def get_all_hough_lines(image, min_angle, max_angle, min_separation_distance,
                        min_separation_angle):

    angles = np.deg2rad(np.arange(min_angle, max_angle, 0.5))
    hough, angles, distances = hough_line(image, angles)

    Debug.save_image("hough", "accumulator", normalize(hough))

    _, peak_angles, peak_distances = \
      hough_line_peaks(hough, angles, distances,
                       num_peaks=150,
                       threshold=0.2*np.amax(hough),
                       min_distance=min_separation_distance,
                       min_angle=min_separation_angle)

    lines = [
        get_line_endpoints_in_image(image, angle, radius)
        for angle, radius in zip(peak_angles, peak_distances)
    ]

    if Debug.active:
        peak_angle_idxs = [
            np.where(angles == angle)[0][0] for angle in peak_angles
        ]
        peak_rho_idxs = [
            np.where(distances == distance)[0][0]
            for distance in peak_distances
        ]
        peak_coords = zip(peak_rho_idxs, peak_angle_idxs)
        peaks = np.zeros(hough.shape)
        for coord in peak_coords:
            peaks[coord] = 1
        Debug.save_image("hough", "accumulator_peaks", peaks)

    if Record.active:
        # Thought get_max_theta_idx might be a useful way to filter
        # real meanlines from spurious meanlines, but it's not
        # reliable when the image is saturated with incorrect
        # meanlines. Filtering lines based on the ROI angle
        # was more effective.

        # max_theta_idx = get_max_theta_idx(hough)
        # Record.record("theta_mode", angles[max_theta_idx])

        # in radians
        average_meanline_angle = np.mean(peak_angles)
        std_deviation_meanline_angle = np.std(peak_angles)

        Record.record("average_meanline_angle",
                      float("%.4f" % average_meanline_angle))
        Record.record("std_deviation_meanline_angle",
                      float("%.4f" % std_deviation_meanline_angle))

    return lines
	def negotiate(self):
		group_decisions = []
		for i, key in enumerate(self.agents.keys()):
			agents = self.agents[key]
			routines = np.transpose(np.array([x.routine  for x in agents]))
			group_influences = np.array([x.intra_group_influence for x in agents])
			group_decision = routines.dot(group_influences)
			group_decision = utils.normalize(group_decision)
			self.data['decisions_{}'.format(key)] += [np.array(group_decision).ravel()]

			change = utils.calculate_tvd(np.array(self.group_decisions[i]).ravel(), \
			                             np.array(group_decision).ravel())
			self.data['decisions_change_{}'.format(key)] += [change]
			group_decisions += [[group_decision]]
		routine = np.transpose(np.array(group_decisions)).dot(self.inter_group_influences)
		change_routine = utils.calculate_tvd(np.array(self.routine).ravel(), np.array(routine).ravel())
		self.routine = utils.normalize(routine)
		self.data['organizational_routine'] += [np.array(self.routine).ravel()]
		self.data['organizational_routine_change'] += [change_routine]
		self.group_decisions = group_decisions
Exemple #20
0
    def train(self, X, epochs=40):
        X = utilities.normalize(X)

        (num_examples, visible_size) = X.shape

        self.weights = RBM.generate_weight_vector(
            visible_size * self.hidden_size).reshape(visible_size,
                                                     self.hidden_size)
        self.visible_biases = RBM.generate_weight_vector(visible_size).reshape(
            visible_size, 1)
        self.hidden_biases = np.zeros(self.hidden_size).reshape(
            1, self.hidden_size)

        self.weights_err_history = []
        self.visible_biases_err_history = []
        self.hidden_biases_err_history = []

        prev_updates = (0, 0, 0)

        start = time.time()
        for e in range(epochs):
            np.random.shuffle(X)
            print 'EPOCH %d' % (e + 1)
            for i in range(num_examples):
                if i % 1000 == 0:
                    print 'Trained %d examples in %d s' % (
                        e * num_examples + i, time.time() - start)
                prev_updates = self.train_example(X[i], prev_updates)

            bucket_size = 1000
            iterations = [
                k * bucket_size
                for k in range((e + 1) * num_examples / bucket_size)
            ]
            utilities.save_scatter(
                iterations,
                utilities.bucket(self.hidden_biases_err_history, bucket_size),
                'hidden_err')
            utilities.save_scatter(
                iterations,
                utilities.bucket(self.visible_biases_err_history, bucket_size),
                'visible_err')
            utilities.save_scatter(
                iterations,
                utilities.bucket(self.weights_err_history, bucket_size),
                'weights_err')

        utilities.save_image(self.visible_biases.reshape(28, 28),
                             'visible_biases')

        RBM.save_weights('weights', self.weights)
        RBM.save_weights('visible_biases', self.visible_biases)
        RBM.save_weights('hidden_biases', self.hidden_biases)
Exemple #21
0
 def random(
         cls,
         xlim=(-1.0, 1.0),
         ylim=(-1.0, 1.0),
 ):
     """Constructs a Landmark object with random position and orientation.
     The position is chosen within the given boundaries.
     """
     x = rdm.uniform(*xlim)
     y = rdm.uniform(*ylim)
     pos = np.array([x, y])
     ori = uts.normalize(np.random.rand(2) - 0.5)
     return cls(pos, ori)
 def step(self, particles, obs, mode):
     '''
     particles: particle list
     '''
     self.t += self.hsw.step_len
     obs_conf = self.obs_conf if (self.fault_para_flag==0).all() else 0.0
     mode_i0 = self.mode0 if not self.state else self.mode[len(self.state)-1]
     self.latest_sp = self.latest_sp if mode_i0==mode else self.t
     particles_ip1 = []
     res = np.zeros(len(self.hsw.obs_sigma))
     for ptc in particles:
         p, r = self.step_particle(ptc, obs, mode_i0, mode)
         particles_ip1.append(p)
         res += r
     self.last_likelihood = sum([ptc.weight for ptc in particles_ip1])
     normalize(particles_ip1, obs_conf)
     re_particles_ip1 = resample(particles_ip1, self.N)
     ave_state = self.ave_state(re_particles_ip1)
     self.tracjectory.append(re_particles_ip1)
     self.state.append(ave_state)
     self.res.append(res)
     self.process_fault(res)
Exemple #23
0
def get_max_theta_idx(hough):
    '''
  Returns the column (theta) of the hough transform with the
  most above-threshold bins.

  '''
    thresh_hough = threshold_hough(hough, 0.2 * np.amax(hough))
    Debug.save_image("hough", "thresholded_accumulator",
                     normalize(thresh_hough))
    # find the column with the most above-threshold bins
    sum_thresh_hough = np.sum(thresh_hough, axis=0)
    max_theta_idx = np.argmax(sum_thresh_hough)
    return max_theta_idx
Exemple #24
0
def precalculate_superpixel_labels():
    if os.path.exists(SUPERPIXEL_FOLDER):
        return

    os.makedirs(SUPERPIXEL_FOLDER, exist_ok = True)

    flair_names, seg_names = get_flair_file_names()

    for name in tqdm(flair_names):
        flair_img = get_scan(os.path.join(BRATS_PARENT, name))
        flair_img = crop_resample_img(normalize(flair_img))
        label = get_superpixel_labels(flair_img)

        name = name.split("/")[1].split(".")[0]
        pickle.dump(label, open(os.path.join(SUPERPIXEL_FOLDER, name + ".p"), "wb"))
Exemple #25
0
def classify(image, model):
    """Classify the math digits or operators.

    Args:
        image: The input image.
        model: The trained model.
    Return:
        numpy.ndarray: The predicted probability.
        int: The predicted class.
    """
    image = 255. - normalize(image)
    image = image[np.newaxis, :, :, np.newaxis] / 255.
    prob = model.predict(image)
    pred = np.argmax(prob)

    return prob, pred
Exemple #26
0
    def get_screen_point(self, vector: np.ndarray, time_now: float,
                         eye_center: np.ndarray) -> np.ndarray:
        """Remember and smooth current gaze vector, get pixel on screen

        :param vector: shape [3], gaze vector
        :param time_now: current time (time.time())
        :param eye_center: shape [3]
        :return: shape [2], point on screen in pixels
        """
        vector = utilities.normalize(vector)

        point = self._screen.get_2d_cross_position(vector, eye_center)

        self._history.append([point[0], point[1], time_now])
        seen_point = utilities.smooth_n_cut(self._history, time_now)

        screen_point = self._translator.seen_to_screen(seen_point)
        return screen_point
    def to_wav(self, filename, mono=False, norm=False, bitdepth=float):
        '''
        Save all the signals to wav files.

        Parameters
        ----------
        filename: str
            the name of the file
        mono: bool, optional
            if true, records only the center channel floor(M / 2) (default `False`)
        norm: bool, optional
            if true, normalize the signal to fit in the dynamic range (default `False`)
        bitdepth: int, optional
            the format of output samples [np.int8/16/32/64 or np.float (default)]
        '''
        from scipy.io import wavfile

        if mono is True:
            signal = self.signals[self.M // 2]
        else:
            signal = self.signals.T  # each column is a channel

        if bitdepth is float:
            bits = None
        elif bitdepth is np.int8:
            bits = 8
        elif bitdepth is np.int16:
            bits = 16
        elif bitdepth is np.int32:
            bits = 32
        elif bitdepth is np.int64:
            bits = 64
        else:
            raise NameError('No such type.')

        if norm is True:
            from utilities import normalize
            signal = normalize(signal, bits=bits)

        signal = np.array(signal, dtype=type)

        wavfile.write(filename, self.Fs, signal)
Exemple #28
0
    def MPRegression(self, train_data, ytrain, num):
        initial = np.random.rand(train_data.shape[1], 1)
        self.weights = utils.normalize(initial, 'l2', axis=0)

        residualT = float("inf")
        currentFeature = [0]

        while residualT > self.params['epsilon']:
            bestFeature = None
            corr = float("-inf")
            bestWeight = None

            for feature in list(
                    set(range(len(self.params['features']))) -
                    set(currentFeature)):
                newfeature = copy(currentFeature)
                newfeature.append(feature)

                prediction = train_data[:, newfeature].dot(
                    self.weights[newfeature, :])
                loss = np.subtract(prediction, ytrain.reshape(num, 1))
                gradient = train_data[:, newfeature].T.dot(loss) / num

                weightNew = self.weights[newfeature, :]
                weightNew -= self.alpha * gradient

                predNew = train_data[:, newfeature].dot(weightNew)
                residual = predNew - ytrain.reshape(num, 1)

                correlation = train_data[:, newfeature].T.dot(residual)
                pearson = np.linalg.norm(correlation) / num

                if pearson > corr:
                    bestFeature = feature
                    corr = pearson
                    residualT = np.linalg.norm(residual) / num
                    bestWeight = weightNew

            currentFeature.append(bestFeature)
            self.weights[currentFeature, :] = bestWeight

        return currentFeature
def disparity_computation(left_img, right_img):

    minDisp = -10
    numDisp = 80
    bSize = 1
    speckleRange = 2
    speckleWindowSize = 4

    stereo = cv2.StereoSGBM_create(minDisparity=minDisp,
                                   numDisparities=numDisp,
                                   blockSize=bSize,
                                   speckleRange=speckleRange,
                                   speckleWindowSize=speckleWindowSize)

    disparity = stereo.compute(left_img, right_img)

    #plt.figure()
    #plt.title("%d, %d, %d, %d, %d" % (minDisp, numDisp, bSize, speckleRange, speckleWindowSize))
    #plt.imshow(disparity, 'gray')
    #plt.colorbar(orientation='horizontal')
    #plt.show()

    return normalize(disparity)
Exemple #30
0
		def binary_search(level, guess, lower_bound, upper_bound, tolerance=0.04):
			level = level + 1
			TVD_guess = utils.calculate_tvd(self.routine[0], guess)
			error = scaled_by_intertia - TVD_guess
			if level == 900:
				# print('type: {}, {} did not converge w/ error: {}'.format(self.type, self.id, error))
				self.convergence = 0
				return guess
			if abs(error) < tolerance:
				# print('type: {}, {} converged'.format(self.type, self.id))
				self.convergence = 1
				return guess
			else:
				routines = np.array(np.vstack([lower_bound, upper_bound]))
				new_guess = utils.normalize(np.mean(routines, 0))

				if utils.calculate_tvd(new_guess, guess) < 1e-6:
					# print(self.type, self,id, 'stable guesses')
					self.convergence = 0
					return guess 
				if error < 0:
					return binary_search(level, new_guess, lower_bound, guess)
				else:
					return binary_search(level, new_guess, guess, upper_bound)
def work():
    global lin_vel, ang_vel
    global position, orientation
    global time
    new_time = rp.get_time()
    dt = new_time - time
    position += lin_vel*dt
    orientation += uts.ccws_perp(orientation)*ang_vel*dt
    orientation = uts.normalize(orientation)
    pose = cms.Pose(position=position, orientation=orientation)
    gms_pose = gms.Pose()
    gms_pose.position = gms.Point(x=position[0], y=position[1])
    matrix = np.eye(4)
    matrix[0:2,0:2] = orientation
    quaternion = tft.quaternion_from_matrix(matrix)
    gms_pose.orientation = gms.Quaternion(
        x = quaternion[0],
        y = quaternion[1],
        z = quaternion[2],
        w = quaternion[3]
    )
    pose_pub.publish(pose)
    gms_pose_pub.publish(gms_pose)
    time = new_time
def perceptual_quality_evaluation(good_source, bad_source):
    '''
    Perceputal Quality evaluation simulation
    Inner Loop
    '''

    # Imports are done in the function so that it can be easily
    # parallelized
    import numpy as np
    from scipy.io import wavfile
    from scipy.signal import resample
    from os import getpid

    from Room import Room
    from beamforming import Beamformer, MicrophoneArray
    from trinicon import trinicon

    from utilities import normalize, to_16b, highpass
    from phat import time_align
    from metrics import snr, pesq
    
    # number of number of sources
    n_sources = np.arange(1,12)
    S = n_sources.shape[0]

    # we the speech samples used
    speech_sample1 = 'samples/fq_sample1_8000.wav'
    speech_sample2 = 'samples/fq_sample2_8000.wav'

    # Some simulation parameters
    Fs = 8000
    t0 = 1./(Fs*np.pi*1e-2)  # starting time function of sinc decay in RIR response
    absorption = 0.90
    max_order_sim = 10
    SNR_at_mic = 20          # SNR at center of microphone array in dB

    # Room 1 : Shoe box
    room_dim = [4, 6]

    # microphone array design parameters
    mic1 = [2, 1.5]         # position
    M = 8                   # number of microphones
    d = 0.08                # distance between microphones
    phi = 0.                # angle from horizontal
    shape = 'Linear'        # array shape

    # create a microphone array
    if shape is 'Circular':
        mics = Beamformer.circular2D(Fs, mic1, M, phi, d*M/(2*np.pi)) 
    else:
        mics = Beamformer.linear2D(Fs, mic1, M, phi, d) 

    # create a single reference mic at center of array
    ref_mic = MicrophoneArray(mics.center, Fs)

    # define the array processing type
    L = 4096                # frame length
    hop = 2048              # hop between frames
    zp = 2048               # zero padding (front + back)
    mics.setProcessing('FrequencyDomain', L, hop, zp, zp)

    # data receptacles
    beamformer_names = ['Rake-DS',
                        'Rake-MaxSINR',
                        'Rake-MaxUDR']
    bf_weights_fun   = [mics.rakeDelayAndSumWeights,
                        mics.rakeMaxSINRWeights,
                        mics.rakeMaxUDRWeights]
    bf_fnames = ['1','2','3']
    NBF = len(beamformer_names)

    # receptacle arrays
    pesq_input = np.zeros(2)
    pesq_trinicon = np.zeros((2,2))
    pesq_bf = np.zeros((2,NBF,S))
    isinr = 0
    osinr_trinicon = np.zeros(2)
    osinr_bf = np.zeros((NBF,S))

    # since we run multiple thread, we need to uniquely identify filenames
    pid = str(getpid())

    file_ref  = 'output_samples/fqref' + pid + '.wav'
    file_suffix = '-' + pid + '.wav'
    files_tri = ['output_samples/fqt' + str(i+1) + file_suffix for i in xrange(2)]
    files_bf = ['output_samples/fq' + str(i+1) + file_suffix for i in xrange(NBF)]
    file_raw  = 'output_samples/fqraw' + pid + '.wav'

    # Read the two speech samples used
    rate, good_signal = wavfile.read(speech_sample1)
    good_signal = np.array(good_signal, dtype=float)
    good_signal = normalize(good_signal)
    good_signal = highpass(good_signal, rate)
    good_len = good_signal.shape[0]/float(Fs)

    rate, bad_signal = wavfile.read(speech_sample2)
    bad_signal = np.array(bad_signal, dtype=float)
    bad_signal = normalize(bad_signal)
    bad_signal = highpass(bad_signal, rate)
    bad_len = bad_signal.shape[0]/float(Fs)

    # variance of good signal
    good_sigma2 = np.mean(good_signal**2)

    # normalize interference signal to have equal power with desired signal
    bad_signal *= good_sigma2/np.mean(bad_signal**2)

    # pick good source position at random
    good_distance = np.linalg.norm(mics.center[:,0] - np.array(good_source))
    
    # pick bad source position at random
    bad_distance = np.linalg.norm(mics.center[:,0] - np.array(bad_source))

    if good_len > bad_len:
        good_delay = 0
        bad_delay = (good_len - bad_len)/2.
    else:
        bad_delay = 0
        good_delay = (bad_len - good_len)/2.

    # compute the noise variance at center of array wrt good signal and SNR
    sigma2_n = good_sigma2/(4*np.pi*good_distance)**2/10**(SNR_at_mic/10)

    # create the reference room for freespace, noisless, no interference simulation
    ref_room = Room.shoeBox2D(
        [0,0],
        room_dim,
        Fs,
        t0 = t0,
        max_order=0,
        absorption=absorption,
        sigma2_awgn=0.)
    ref_room.addSource(good_source, signal=good_signal, delay=good_delay)
    ref_room.addMicrophoneArray(ref_mic)
    ref_room.compute_RIR()
    ref_room.simulate()
    reference = ref_mic.signals[0]
    reference_n = normalize(reference)

    # save the reference desired signal
    wavfile.write(file_ref, Fs, to_16b(reference_n))

    # create the 'real' room with sources and mics
    room1 = Room.shoeBox2D(
        [0,0],
        room_dim,
        Fs,
        t0 = t0,
        max_order=max_order_sim,
        absorption=absorption,
        sigma2_awgn=sigma2_n)

    # add sources to room
    room1.addSource(good_source, signal=good_signal, delay=good_delay)
    room1.addSource(bad_source, signal=bad_signal, delay=bad_delay)

    # Record first the degraded signal at reference mic (center of array)
    room1.addMicrophoneArray(ref_mic)
    room1.compute_RIR()
    room1.simulate()
    raw_n = normalize(highpass(ref_mic.signals[0], Fs))

    # save degraded reference signal
    wavfile.write(file_raw, Fs, to_16b(raw_n))

    # Compute PESQ and SINR of raw degraded reference signal
    isinr = snr(reference_n, raw_n[:reference_n.shape[0]])
    pesq_input[:] = pesq(file_ref, file_raw, Fs=Fs).T
        
    # Now record input of microphone array
    room1.addMicrophoneArray(mics)
    room1.compute_RIR()
    room1.simulate()

    # Run the Trinicon algorithm
    double_sig = mics.signals.copy()
    for i in xrange(2):
        double_sig = np.concatenate((double_sig, mics.signals), axis=1)
    sig_len = mics.signals.shape[1]
    output_trinicon = trinicon(double_sig)[:,-sig_len:]

    # normalize time-align and save to file
    output_tri1 = normalize(highpass(output_trinicon[0,:], Fs))
    output_tri1 = time_align(reference_n, output_tri1)
    wavfile.write(files_tri[0], Fs, to_16b(output_tri1))
    output_tri2 = normalize(highpass(output_trinicon[1,:], Fs))
    output_tri2 = time_align(reference_n, output_tri2)
    wavfile.write(files_tri[1], Fs, to_16b(output_tri2))

    # evaluate
    # Measure PESQ and SINR for both output signals, we'll sort out later
    pesq_trinicon = pesq(file_ref, files_tri, Fs=Fs)
    osinr_trinicon[0] = snr(reference_n, output_tri1)
    osinr_trinicon[1] = snr(reference_n, output_tri2)

    # Run all the beamformers
    for k,s in enumerate(n_sources):

        ''' 
        BEAMFORMING PART
        '''
        # Extract image sources locations and create noise covariance matrix
        good_sources = room1.sources[0].getImages(n_nearest=s, 
                                                    ref_point=mics.center)
        bad_sources = room1.sources[1].getImages(n_nearest=s,
                                                    ref_point=mics.center)
        Rn = sigma2_n*np.eye(mics.M)

        # run for all beamformers considered
        for i, bfr in enumerate(beamformer_names):

            # compute the beamforming weights
            bf_weights_fun[i](good_sources, bad_sources,
                                    R_n = sigma2_n*np.eye(mics.M), 
                                    attn=True, ff=False)

            output = mics.process()
            output = normalize(highpass(output, Fs))
            output = time_align(reference_n, output)

            # save files for PESQ evaluation
            wavfile.write(files_bf[i], Fs, to_16b(output))

            # compute output SINR
            osinr_bf[i,k] = snr(reference_n, output)

            # compute PESQ
            pesq_bf[:,i,k] = pesq(file_ref, files_bf[i], Fs=Fs).T

            # end of beamformers loop

        # end of number of sources loop

    return pesq_input, pesq_trinicon, pesq_bf, isinr, osinr_trinicon, osinr_bf
# create a microphone array
if shape is 'Circular':
    mics = bf.Beamformer.circular2D(Fs, mic1, M, phi, d*M/(2*np.pi)) 
else:
    mics = bf.Beamformer.linear2D(Fs, mic1, M, phi, d) 

# define the array processing type
L = 4096                # frame length
hop = 2048              # hop between frames
zp = 2048               # zero padding (front + back)
mics.setProcessing('FrequencyDomain', L, hop, zp, zp)

# The first signal (of interest) is singing
rate1, signal1 = wavfile.read('samples/singing_'+str(Fs)+'.wav')
signal1 = np.array(signal1, dtype=float)
signal1 = u.normalize(signal1)
signal1 = u.highpass(signal1, Fs)
delay1 = 0.

# the second signal (interferer) is some german speech
rate2, signal2 = wavfile.read('samples/german_speech_'+str(Fs)+'.wav')
signal2 = np.array(signal2, dtype=float)
signal2 = u.normalize(signal2)
signal2 = u.highpass(signal2, Fs)
delay2 = 1.

# compute the noise variance at center of array wrt signal1 and SNR
sigma2_signal1 = np.mean(signal1**2)
distance = np.linalg.norm(mics.center[:,0] - np.array(good_source))
sigma2_n = sigma2_signal1/(4*np.pi*distance)**2/10**(SNR_at_mic/10)
Exemple #34
0
        "wires": [[]] * T
    }
    simulation = ParticleSim(system,
                             data,
                             env,
                             br=border_region,
                             sticky=allow_attachment)
    simname = env.name + "_N" + str(N) + "_T" + str(T) + "_R" + str(
        start) + "_A" + str(action)

    # create N particles at random locations in the polygon
    starting_poly = env
    # start_pts = uniform_sample_from_poly(starting_poly, N)
    start_pts = uniform_sample_along_circle(env, N, 2.0)
    for i in range(N):
        vel = normalize(np.array([random() - 0.5, random() - 0.5]))
        system.particle.append(
            Particle(position=start_pts[i],
                     velocity=list(vel),
                     radius=None,
                     species='A-free'))

    # run simulation for T steps
    simulation.run(T - 1)
    print("ran sim for ", T, "steps")

    write_data(simulation.db, simname)

    ANIMATE = True
    if ANIMATE:
        # make iterator to make animation easier
# create a microphone array
if shape is 'Circular':
    mics = bf.Beamformer.circular2D(Fs, mic1, M, phi, d*M/(2*np.pi)) 
else:
    mics = bf.Beamformer.linear2D(Fs, mic1, M, phi, d) 

# define the array processing type
L = 4096                # frame length
hop = 2048              # hop between frames
zp = 2048               # zero padding (front + back)
mics.setProcessing('FrequencyDomain', L, hop, zp, zp)

# The first signal (of interest) is singing
rate1, signal1 = wavfile.read('samples/singing_'+str(Fs)+'.wav')
signal1 = np.array(signal1, dtype=float)
signal1 = u.normalize(signal1)
signal1 = u.highpass(signal1, Fs)
delay1 = 0.

# the second signal (interferer) is some german speech
rate2, signal2 = wavfile.read('samples/german_speech_'+str(Fs)+'.wav')
signal2 = np.array(signal2, dtype=float)
signal2 = u.normalize(signal2)
signal2 = u.highpass(signal2, Fs)
delay2 = 1.

# create the room with sources and mics
room1 = rg.Room.shoeBox2D(
    [0,0],
    room_dim,
    Fs,
Exemple #36
0
output_folder = sys.argv[2]

if output_folder[-1] != '/':
    output_folder += '/'

X, Y = util.wav_to_np(data_folder)
X = [util.sliding_window(x, 40, 20) for x in X]

X = np.vstack(X)
X = X[np.random.permutation(len(X))]
X_Kmeans = X[:KMeans_tr_size]
D = KMeans(n_clusters=D_atoms, init_size=D_atoms*3)
D.fit(X_Kmeans)
D = D.cluster_centers_

D = util.normalize(D)
X = util.normalize(X)
D_mean = np.mean(D, axis=0)
D = D - D_mean
X = X - D_mean
U, S_D, V = np.linalg.svd(D)
_, S_X, _ = np.linalg.svd(X[:M])
V = V.T
# V = np.random.randn(*V.shape)
# V = V / np.linalg.norm(V, axis=1)
VD = np.dot(V, D.T)
VX = np.dot(V, X.T)

N = 25
linewidth = 2
# x_idx = np.argmax(VX[0])
Exemple #37
0
                                 pipe.svm.intercept_, 13)])
    cl3.append(pipe.classify(X_normal[i]))


print len([i for i, j in zip(cl1, cl3) if i == j])

assert False

X_Kmeans = np.vstack(X)[:KMeans_tr_size]

# Train D using KMeans
D = KMeans(n_clusters=args['D_atoms'], init_size=args['D_atoms']*3)
D.fit(X_Kmeans)
D = D.cluster_centers_
np.savetxt('/home/brad/data/voice_D_200.csv', D.T, delimiter=',', fmt='%2.6f')
D = util.normalize(D)
D_mean = np.mean(D, axis=0)
D = D - D_mean

#TODO: update to use D
svm_X = []
for x in X:
    x = util.normalize(x)
    x = x - D_mean
    nbrs = np.argmax(np.abs(np.dot(D, x.T)), axis=0)
    svm_X.append(util.bow(nbrs, args['D_atoms']))

clf = SVC(kernel='linear')
clf.fit(svm_X, Y)

assert False